demo version

This commit is contained in:
Josip Milovac 2023-07-13 11:32:02 +10:00
parent fbb282a801
commit 672d6daa8e
125 changed files with 17918 additions and 1481 deletions

13
.eslintrc.json Normal file
View file

@ -0,0 +1,13 @@
{
"env": {
"browser": true,
"commonjs": true,
"es2021": true
},
//"extends": "google",
"parserOptions": {
"ecmaVersion": "latest"
},
"rules": {
}
}

3
.gitignore vendored
View file

@ -6,3 +6,6 @@
/node_modules
/package-lock.json
/.DS_Store
/demo/camera_client/out/build
/demo/camera_client/lib64
/demo/out/build

View file

@ -1,9 +1,14 @@
const ChainUtil = require('../chain-util');
const { DIFFICULTY, MINE_RATE } = require('../constants');
const ChainUtil = require('../util/chain-util');
const { DIFFICULTY, MINE_RATE } = require('../util/constants');
const BrokerRegistration = require('./broker-registration');
const SensorRegistration = require('./sensor-registration');
const Integration = require('./integration');
const Payment = require('./payment');
const Compensation = require('./compensation');
function concatIfNotUndefined(concatTo, prefix, concatting) {
if (typeof concatting !== "undefined" && concatting.length !== 0) {
return concatTo + `${prefix}${concatting}`;
return concatTo + `${prefix}${concatting.signature}`;
} else {
return concatTo;
}
@ -20,18 +25,24 @@ function getData(block, key) {
}
}
const acceptableMembers = new Set();
acceptableMembers.add("timestamp");
acceptableMembers.add("lastHash");
acceptableMembers.add("hash");
acceptableMembers.add("reward");
acceptableMembers.add("payments");
acceptableMembers.add("sensorRegistrations");
acceptableMembers.add("brokerRegistrations");
acceptableMembers.add("integrations");
acceptableMembers.add("compensations");
acceptableMembers.add("nonce");
acceptableMembers.add("difficulty");
const baseValidation = {
timestamp: ChainUtil.createValidateIsIntegerWithMin(0),
lastHash: ChainUtil.validateIsString,
hash: ChainUtil.validateIsString,
reward: ChainUtil.validateIsPublicKey,
nonce: ChainUtil.createValidateIsIntegerWithMin(0),
difficulty: ChainUtil.createValidateIsIntegerWithMin(0),
sensorRegistrations: ChainUtil.createValidateOptional(
ChainUtil.createValidateArray(SensorRegistration.verify)),
brokerRegistrations: ChainUtil.createValidateOptional(
ChainUtil.createValidateArray(BrokerRegistration.verify)),
integrations: ChainUtil.createValidateOptional(
ChainUtil.createValidateArray(Integration.verify)),
compensations: ChainUtil.createValidateOptional(
ChainUtil.createValidateArray(Compensation.verify)),
payments: ChainUtil.createValidateOptional(
ChainUtil.createValidateArray(Payment.verify))
}
class Block {
constructor(timestamp, lastHash, hash, reward, payments, sensorRegistrations, brokerRegistrations, integrations, compensations, nonce, difficulty) {
@ -185,104 +196,17 @@ class Block {
difficulty);
}
static validateIsBlock(block) {
if (!(block instanceof Object)) {
return {
result: false,
reason: "Is not an object"
};
static verify(block) {
const validationRes = ChainUtil.validateObject(block, baseValidation);
if (!validationRes.result) {
return validationRes;
}
for (const key in block) {
if (!acceptableMembers.has(key)) {
if (!Block.checkHash(block)) {
return {
result: false,
reason: `Block has key not in acceptable members`
};
}
}
if (!("timestamp" in block)) {
return {
result: false,
reason: "Block doesn't have a timestamp"
};
}
const timestampRes = ChainUtil.validateIsIntegerWithMin(block.timestamp, 0);
if (!timestampRes.result) {
return {
result: false,
reason: "Timestamp validation failed: " + timestampRes.reason
};
}
if (!("lastHash" in block)) {
return {
result: false,
reason: "Block doesn't have lastHash"
};
}
const lastHashRes = ChainUtil.validateIsString(block.lastHash);
if (!lastHashRes.result) {
return {
result: false,
reason: "lastHash validation failed: " + lastHashRes.reason
};
}
if (!("hash" in block)) {
return {
result: false,
reason: "Block doesn't have hash"
};
}
const hashRes = ChainUtil.validateIsString(block.hash);
if (!hashRes.result) {
return {
result: false,
reason: "hash validation failed: " + hashRes.reason
};
}
if (!("reward" in block)) {
return {
result: false,
reason: "Block doesn't have reward"
};
}
const rewardRes = ChainUtil.validateIsPublicKey(block.reward);
if (!rewardRes.result) {
return {
result: false,
reason: "reward validation failed: " + rewardRes.reason
};
}
if (!("nonce" in block)) {
return {
result: false,
reason: "Block doesn't have nonce"
};
}
const nonceRes = ChainUtil.validateIsIntegerWithMin(block.nonce);
if (!nonceRes.result) {
return {
result: false,
reason: "nonce validation failed: " + nonceRes.reason
};
}
if (!("difficulty" in block)) {
return {
result: false,
reason: "Block doesn't have difficulty"
};
}
const difficultyRes = ChainUtil.validateIsIntegerWithMin(block.difficulty);
if (!difficultyRes.result) {
return {
result: false,
reason: "difficulty validation failed: " + difficultyRes.reason
reason: "Couldn't verify hash"
};
}

View file

@ -6,10 +6,13 @@ const BrokerRegistration = require('./broker-registration');
const Integration = require('./integration');
const Compensation = require('./compensation');
const fs = require('fs');
const ChainUtil = require('../chain-util');
const ChainUtil = require('../util/chain-util');
const RdsStore = require('./rds-store');
const {
MINING_REWARD} = require('../constants');
MINING_REWARD,
SENSHAMART_URI_REPLACE } = require('../util/constants');
const URIS = require('./uris');
function makeIntegrationKey(publicKey, counter) {
return `${publicKey}/${counter}`;
@ -80,7 +83,9 @@ class PropertyHistory {
throw new Error("Finishing Property History with null backing");
}
this.backing.undos.push(...this.undos);
for (const undo of this.undos) {
this.backing.undos.push(undo);
}
Object.assign(this.backing.current, this.current);
this.backing = null;
@ -113,6 +118,30 @@ function getPropertyClone(propertyHistory, key, fallback) {
}
}
function namedNode(x) {
return DataFactory.namedNode(x);
}
function literal(x) {
return DataFactory.literal(x);
}
function makeBlockName(block) {
return URIS.OBJECT.BLOCK + '/' + block.hash;
}
function makeSensorTransactionName(sensorRegistration) {
return URIS.OBJECT.SENSOR_REGISTRATION + '/' + SensorRegistration.hashToSign(sensorRegistration);
}
function makeBrokerTransactionName(brokerRegistration) {
return URIS.OBJECT.BROKER_REGISTRATION + '/' + BrokerRegistration.hashToSign(brokerRegistration);
}
function makeWalletName(input) {
return URIS.OBJECT.WALLET + '/' + input;
}
class Updater {
constructor(parent, block) {
this.parent = parent;
@ -126,20 +155,23 @@ class Updater {
this.store.startPush();
if (block !== null) {
this.store.push(
DataFactory.quad(
DataFactory.namedNode(this.block.hash),
DataFactory.namedNode("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
DataFactory.namedNode("http://SSM/Block")));
this.pushQuad(
namedNode(makeBlockName(this.block)),
namedNode(URIS.PREDICATE.TYPE),
namedNode(URIS.OBJECT.BLOCK));
this.store.push(
DataFactory.quad(
DataFactory.namedNode(this.block.hash),
DataFactory.namedNode("http://SSM/lastBlock"),
DataFactory.namedNode(this.parent.getBlockFromTop(0).hash)));
this.pushQuad(
namedNode(makeBlockName(this.block.hash)),
namedNode(URIS.PREDICATE.LAST_BLOCK),
namedNode(makeBlockName(this.parent.getBlockFromTop(0))));
}
}
pushQuad(subject, predicate, object) {
this.store.push(
DataFactory.quad(subject, predicate, object));
}
getBalanceCopy(publicKey) {
if (publicKey in this.balances) {
return Object.assign({}, this.balances[publicKey]);
@ -177,9 +209,9 @@ class Updater {
}
getBrokerPublicKeys() {
const keys = this.parent.getBrokerPublicKeysSet();
const keys = this.parent.getBrokerKeysSet();
for (const [key, value] of this.brokers) {
for (const [key, value] of Object.entries(this.brokers)) {
keys.add(value.input);
}
@ -339,7 +371,9 @@ class Chain {
throw new Error("Finishing Blockchain Metadata with null parent");
}
this.parent.blocks.push(...this.blocks);
for (const block of this.blocks) {
this.parent.blocks.push(block);
}
this.balances.finish();
this.sensors.finish();
this.brokers.finish();
@ -349,13 +383,30 @@ class Chain {
}
}
function addRDF(store, metadata) {
function uriReplacePrefix(testing, sensorName) {
if (testing.startsWith(SENSHAMART_URI_REPLACE)) {
return sensorName.concat(testing.slice(SENSHAMART_URI_REPLACE.length));
} else {
return testing;
}
}
function addNodeRDF(updater, metadata, sensorName) {
for (const triple of metadata) {
store.push(
DataFactory.quad(
DataFactory.namedNode(triple.s),
DataFactory.namedNode(triple.p),
DataFactory.namedNode(triple.o)));
updater.pushQuad(
namedNode(uriReplacePrefix(triple.s, sensorName)),
namedNode(uriReplacePrefix(triple.p, sensorName)),
namedNode(uriReplacePrefix(triple.o, sensorName)));
}
}
function addLiteralRDF(updater, metadata, sensorName) {
for (const triple of metadata) {
updater.pushQuad(
namedNode(uriReplacePrefix(triple.s, sensorName)),
namedNode(uriReplacePrefix(triple.p, sensorName)),
literal(triple.o));
}
}
@ -442,7 +493,7 @@ function stepIntegration(updater, reward, integration) {
inputBalance.balance -= integration.rewardAmount;
for (const output of integration.outputs) {
const foundSensor = updater.getSensorCopy(output.sensor);
const foundSensor = updater.getSensorCopy(output.sensorName);
if (foundSensor === null) {
return {
@ -450,12 +501,29 @@ function stepIntegration(updater, reward, integration) {
reason: `Integration references non-existant sensor: ${output.sensor}`
};
}
if (foundSensor.counter !== output.counter) {
if (SensorRegistration.hashToSign(foundSensor) !== output.sensorHash) {
return {
result: false,
reason: "Integration references non-current version of sensor"
};
}
const foundBroker = updater.getBrokerCopy(SensorRegistration.getIntegrationBroker(foundSensor));
if (foundBroker === null) {
return {
result: false,
reason: "Internal consitency error, can't find broker referenced by commited sensor registration"
};
}
if (BrokerRegistration.hashToSign(foundBroker) !== output.brokerHash) {
return {
result: false,
reason: "Integration references non-current version of sensor's broker"
};
}
if (inputBalance.balance < output.amount) {
return {
result: false,
@ -471,14 +539,21 @@ function stepIntegration(updater, reward, integration) {
updater.setBalance(reward, rewardBalance);
const integrationCopy = Object.assign({}, integration);
const brokers = updater.getBrokerKeys();
const brokers = updater.getBrokerPublicKeys();
const witnesses = Integration.chooseWitnesses(integration, brokers);
if (!witnesses.result) {
return {
result: false,
reason: "Couldn't choose witnesses: " + witnesses.reason
};
}
integrationCopy.witnesses = {};
integrationCopy.compensationCount = 0;
for (const witness of witnesses) {
for (const witness of witnesses.witnesses) {
integrationCopy.witnesses[witness] = false;
}
@ -567,16 +642,7 @@ function stepSensorRegistration(updater, reward, sensorRegistration) {
};
}
const extInfo = SensorRegistration.getExtInformation(sensorRegistration);
if (!extInfo.result) {
return {
result: false,
reason: "Couldn't get sensor registration ext information: " + extInfo.reason
};
}
const foundBroker = updater.getBrokerCopy(extInfo.metadata.integrationBroker);
const foundBroker = updater.getBrokerCopy(SensorRegistration.getIntegrationBroker(sensorRegistration));
if (foundBroker === null) {
return {
@ -609,32 +675,65 @@ function stepSensorRegistration(updater, reward, sensorRegistration) {
rewardBalance.balance += sensorRegistration.rewardAmount;
updater.setBalance(reward, rewardBalance);
addRDF(updater.store, sensorRegistration.metadata);
const sensorName = SensorRegistration.getSensorName(sensorRegistration);
const newSensor = extInfo.metadata;
updater.store.push(
DataFactory.quad(
DataFactory.namedNode(newSensor.sensorName),
DataFactory.namedNode("http://SSM/transactionCounter"),
DataFactory.literal(sensorRegistration.counter)));
updater.store.push(
DataFactory.quad(
DataFactory.namedNode(newSensor.sensorName),
DataFactory.namedNode("http://SSM/OwnedBy"),
DataFactory.namedNode("http://SSM/Wallet/" + sensorRegistration.input)));
updater.store.push(
DataFactory.quad(
DataFactory.namedNode(updater.block.hash),
DataFactory.namedNode("http://SSM/Transaction"),
DataFactory.namedNode(newSensor.sensorName)));
updater.store.push(
DataFactory.quad(
DataFactory.namedNode(updater.block.hash),
DataFactory.namedNode("http://SSM/SensorRegistration"),
DataFactory.namedNode(newSensor.sensorName)));
const foundExistingSensor = updater.getSensorCopy(sensorName);
newSensor.counter = sensorRegistration.counter;
updater.setSensor(newSensor.sensorName, newSensor);
if (foundExistingSensor !== null) {
if(foundExistingSensor.input !== sensorRegistration.input) {
return {
result: false,
reason: "A sensor has already been defined with this name"
};
}
}
addNodeRDF(updater, SensorRegistration.getExtraNodeMetadata(sensorRegistration), sensorName);
addLiteralRDF(updater, SensorRegistration.getExtraLiteralMetadata(sensorRegistration), sensorName);
const transactionName = makeSensorTransactionName(sensorRegistration);
if (updater.block !== null) {
updater.pushQuad(
namedNode(makeBlockName(updater.block)),
namedNode(URIS.PREDICATE.CONTAINS_TRANSACTION),
namedNode(transactionName));
updater.pushQuad(
namedNode(makeBlockName(updater.block)),
namedNode(URIS.PREDICATE.CONTAINS_SENSOR_REGISTRATION),
namedNode(transactionName));
updater.pushQuad(
namedNode(transactionName),
namedNode(URIS.PREDICATE.TYPE),
namedNode(URIS.OBJECT.SENSOR_REGISTRATION));
updater.pushQuad(
namedNode(transactionName),
namedNode(URIS.PREDICATE.HAS_COUNTER),
literal(sensorRegistration.counter));
updater.pushQuad(
namedNode(transactionName),
namedNode(URIS.PREDICATE.IS_OWNED_BY),
namedNode(makeWalletName(sensorRegistration.input)));
updater.pushQuad(
namedNode(transactionName),
namedNode(URIS.PREDICATE.DEFINES),
namedNode(sensorName));
updater.pushQuad(
namedNode(transactionName),
namedNode(URIS.PREDICATE.COSTS_PER_MINUTE),
literal(SensorRegistration.getCostPerMinute(sensorRegistration)));
updater.pushQuad(
namedNode(transactionName),
namedNode(URIS.PREDICATE.COSTS_PER_KB),
literal(SensorRegistration.getCostPerKB(sensorRegistration)));
updater.pushQuad(
namedNode(transactionName),
namedNode(URIS.PREDICATE.USES_BROKER),
namedNode(makeBrokerTransactionName(foundBroker)));
}
updater.setSensor(sensorName, sensorRegistration);
return {
result: true
@ -650,15 +749,6 @@ function stepBrokerRegistration(updater, reward, brokerRegistration) {
};
}
const extInfo = BrokerRegistration.getExtInformation(brokerRegistration);
if (!extInfo.result) {
return {
result: false,
reason: "Couldn't get broker registration ext information: " + extInfo.reason
};
}
const inputBalance = updater.getBalanceCopy(brokerRegistration.input);
if (brokerRegistration.counter <= inputBalance.counter) {
@ -683,33 +773,56 @@ function stepBrokerRegistration(updater, reward, brokerRegistration) {
rewardBalance.balance += brokerRegistration.rewardAmount;
updater.setBalance(reward, rewardBalance);
addRDF(updater.store, brokerRegistration.metadata);
const brokerName = BrokerRegistration.getBrokerName(brokerRegistration);
const newBroker = extInfo.metadata;
newBroker.input = brokerRegistration.input;
updater.store.push(
DataFactory.quad(
DataFactory.namedNode(newBroker.brokerName),
DataFactory.namedNode("http://SSM/transactionCounter"),
DataFactory.literal(brokerRegistration.counter)));
updater.store.push(
DataFactory.quad(
DataFactory.namedNode(newBroker.brokerName),
DataFactory.namedNode("http://SSM/OwnedBy"),
DataFactory.namedNode("http://SSM/Wallet/" + brokerRegistration.input)));
updater.store.push(
DataFactory.quad(
DataFactory.namedNode(updater.block.hash),
DataFactory.namedNode("http://SSM/Transaction"),
DataFactory.namedNode(newBroker.brokerName)));
updater.store.push(
DataFactory.quad(
DataFactory.namedNode(updater.block.hash),
DataFactory.namedNode("http://SSM/BrokerRegistration"),
DataFactory.namedNode(newBroker.brokerName)));
const foundExistingBroker = updater.getBrokerCopy(brokerName);
newBroker.counter = brokerRegistration.counter;
updater.setBroker(newBroker.brokerName, newBroker);
if (foundExistingBroker !== null) {
if(foundExistingBroker.input !== brokerRegistration.input) {
return {
result: false,
reason: "A broker has already been defined with this name"
};
}
}
addNodeRDF(updater, BrokerRegistration.getExtraNodeMetadata(brokerRegistration), brokerName);
addLiteralRDF(updater, BrokerRegistration.getExtraLiteralMetadata(brokerRegistration), brokerName);
const transactionName = makeBrokerTransactionName(brokerRegistration);
if (updater.block !== null) {
updater.pushQuad(
namedNode(makeBlockName(updater.block)),
namedNode(URIS.PREDICATE.CONTAINS_TRANSACTION),
namedNode(transactionName));
updater.pushQuad(
namedNode(makeBlockName(updater.block)),
namedNode(URIS.PREDICATE.CONTAINS_BROKER_REGISTRATION),
namedNode(transactionName));
updater.pushQuad(
namedNode(transactionName),
namedNode(URIS.PREDICATE.TYPE),
namedNode(URIS.OBJECT.BROKER_REGISTRATION));
updater.pushQuad(
namedNode(transactionName),
namedNode(URIS.PREDICATE.HAS_COUNTER),
literal(brokerRegistration.counter));
updater.pushQuad(
namedNode(transactionName),
namedNode(URIS.PREDICATE.IS_OWNED_BY),
namedNode(makeWalletName(brokerRegistration.input)));
updater.pushQuad(
namedNode(transactionName),
namedNode(URIS.PREDICATE.DEFINES),
namedNode(brokerName));
updater.pushQuad(
namedNode(transactionName),
namedNode(URIS.PREDICATE.HAS_ENDPOINT),
literal(BrokerRegistration.getEndpoint(brokerRegistration)));
}
updater.setBroker(BrokerRegistration.getBrokerName(brokerRegistration), brokerRegistration);
return {
result: true
@ -850,6 +963,8 @@ function findBlocksDifference(oldBlocks, newBlocks) {
const verifyRes = verifyBlockHash(newBlocks[i - 1], newBlocks[i]);
if (!verifyRes.result) {
console.log(`${newBlocks[i - 1].hash}`);
console.log(`${newBlocks[i].lastHash}`);
return {
result: false,
reason: `Couldn't verify hashes for block ${i}: ${verifyRes.reason}`
@ -976,9 +1091,9 @@ class Blockchain {
return true;
}
wouldBeValidBlock(rewardee, payments, sensorRegistrations, brokerRegistrations, integrations) {
wouldBeValidBlock(rewardee, payments, sensorRegistrations, brokerRegistrations, integrations, compensations) {
const updater = this.chain.createUpdater(null);
return verifyTxs(updater, rewardee, payments, sensorRegistrations, brokerRegistrations, integrations).result;
return verifyTxs(updater, rewardee, payments, sensorRegistrations, brokerRegistrations, integrations, compensations).result;
}
static isValidChain(blocks) {
@ -1025,7 +1140,10 @@ class Blockchain {
this.chain = baseChain;
verifyResult.newChain.finish();
onChange(this, this.blocks(), oldChain, chainDifferenceRes.difference);
console.log(`new chain of length: ${this.blocks().length}`);
onChange(this, this.blocks(), oldChain.blocks, chainDifferenceRes.difference);
return {
result: true,

View file

@ -1,4 +1,4 @@
const Blockchain = require('./index');
const Blockchain = require('./blockchain');
const Block = require('./block');
describe('Blockchain', () => {
@ -15,13 +15,13 @@ describe('Blockchain', () => {
it('adds a new block', () => {
const reward = 'test-reward-key';
expect(bc.addBlock(Block.debugMine(bc.lastBlock(),reward,[],[]))).toBe(true);
expect(bc.addBlock(Block.debugMine(bc.lastBlock(),reward))).toBe(true);
expect(bc.lastBlock().reward).toEqual(reward);
});
it('validates a valid chain', () => {
expect(bc2.addBlock(Block.debugMine(bc2.lastBlock(), 'test-reward-key', [], []))).toBe(true);
expect(bc2.addBlock(Block.debugMine(bc2.lastBlock(), 'test-reward-key'))).toBe(true);
expect(Blockchain.isValidChain(bc2.chain)).toBe(true);
});

View file

@ -1,151 +1,91 @@
const ChainUtil = require('../chain-util');
const ChainUtil = require('../util/chain-util');
const SENSHAMART_URI_PREFIX = require('../util/constants').SENSHAMART_URI_PREFIX;
const tripleValidator = {
s: ChainUtil.validateIsString,
p: ChainUtil.validateIsString,
o: ChainUtil.validateIsString
function validateTerm(t) {
const stringRes = ChainUtil.validateIsString(t);
if (!stringRes.result) {
return stringRes;
}
if (t.startsWith(SENSHAMART_URI_PREFIX)) {
return {
result: false,
reason: "Starts with reserved prefix"
};
}
return {
result: true
};
}
function validateLiteral(t) {
const termRes = validateTerm(t);
if (termRes.result) {
return termRes;
}
const numberRes = ChainUtil.validateIsNumber(t);
if (numberRes.result) {
return numberRes;
}
return {
result: false,
reason: "Wasn't a string or a number"
};
}
const nodeValidator = {
s: validateTerm,
p: validateTerm,
o: validateTerm
};
function validateMetadata(t) {
const literalValidator = {
s: validateTerm,
p: validateTerm,
o: validateLiteral
};
let isBroker = [];
let costPerMinute = [];
let costPerKB = [];
let integrationEndpoint = [];
const validationRes = ChainUtil.validateArray(t, ChainUtil.createValidateObject(tripleValidator));
if (!validationRes.result) {
return validationRes;
}
for (const triple of t) {
switch (triple.p) {
case "http://SSM/Cost_of_Using_IoT_Devices/Cost_Per_Minute": costPerMinute.push(triple); break;
case "http://SSM/Cost_of_Using_IoT_Devices/Cost_Per_Kbyte": costPerKB.push(triple); break;
case "http://www.w3.org/1999/02/22-rdf-syntax-ns#type":
if (triple.o === "http://SSM/Broker") {
isBroker.push(triple.s);
}
break;
case "http://SSM/Integration/Endpoint": integrationEndpoint.push(triple); break;
}
}
if (isBroker.length === 0) {
return {
result: false,
reason: "No broker is defined"
};
} else if (isBroker.length > 1) {
return {
result: false,
reason: "Multiple brokers are defined"
};
}
const brokerName = isBroker[0];
if (costPerMinute.length === 0) {
return {
result: false,
reason: "No cost per minute was defined"
};
} else if (costPerMinute.length > 1) {
return {
result: false,
reason: "Multiple cost per minutes were defined"
}
}
const CostPerMinuteValue = Number.parseInt(costPerMinute[0].o);
if (CostPerMinuteValue === NaN) {
return {
result: false,
reason: "Couldn't parse cost per minute as an integer"
};
} else if (CostPerMinuteValue < 1) {
return {
result: false,
reason: "Cost per minute was negative"
}
} else if (costPerMinute[0].s != brokerName) {
return {
result: false,
reason: "Cost per minute object isn't the broker"
};
}
if (costPerKB.length === 0) {
return {
result: false,
reason: "No cost per KB was defined"
};
} else if (costPerKB.length > 1) {
return {
result: false,
reason: "Multiple cost per KB were defined"
}
}
const CostPerKBValue = Number.parseInt(costPerKB[0].o);
if (CostPerKBValue === NaN) {
return {
result: false,
reason: "Couldn't parse cost per KB as an integer"
};
} else if (CostPerKBValue < 1) {
return {
result: false,
reason: "Cost per KB was negative"
}
} else if (costPerKB[0].s != brokerName) {
return {
result: false,
reason: "Cost per KB object isn't the broker"
};
}
if (integrationEndpoint.length === 0) {
return {
result: false,
reason: "No integration endpoint was defined"
};
} else if (integrationEndpoint.length > 1) {
return {
result: false,
reason: "Multiple integration endpoints were defined"
};
} else if (integrationEndpoint[0].s != brokerName) {
return {
result: false,
reason: "Integration endpoint object isn't the broker"
};
}
return {
result: true,
metadata: {
brokerName: brokerName,
costPerMinute: CostPerMinuteValue,
costPerKB: CostPerKBValue,
integrationEndpoint: integrationEndpoint[0].o
}
};
const metadataValidation = {
name: ChainUtil.validateIsString,
endpoint: ChainUtil.validateIsString,
extraNodes: ChainUtil.createValidateOptional(
ChainUtil.createValidateArray(
ChainUtil.createValidateObject(
nodeValidator))),
extraLiterals: ChainUtil.createValidateOptional(
ChainUtil.createValidateArray(
ChainUtil.createValidateObject(
literalValidator)))
}
const baseValidation = {
input: ChainUtil.validateIsPublicKey,
counter: ChainUtil.validateIsInteger,
counter: ChainUtil.createValidateIsIntegerWithMin(0),
rewardAmount: ChainUtil.createValidateIsIntegerWithMin(0),
metadata: validateMetadata,
metadata: ChainUtil.createValidateObject(metadataValidation),
signature: ChainUtil.validateIsSignature
};
class BrokerRegistration {
constructor(senderKeyPair, counter, metadata, rewardAmount) {
constructor(senderKeyPair, counter, brokerName, endpoint, nodeMetadata, literalMetadata, rewardAmount) {
this.input = senderKeyPair.getPublic().encode('hex');
this.counter = counter;
this.rewardAmount = rewardAmount;
this.metadata = metadata;
this.metadata = {
name: brokerName,
endpoint: endpoint
};
if (typeof nodeMetadata !== undefined && nodeMetadata !== null) {
this.metadata.extraNodes = nodeMetadata;
};
if (typeof literalMetadata !== undefined && literalMetadata !== null) {
this.metadata.extraLiterals = literalMetadata;
};
this.signature = senderKeyPair.sign(BrokerRegistration.hashToSign(this));
const verification = BrokerRegistration.verify(this);
@ -154,6 +94,30 @@ class BrokerRegistration {
}
}
static getBrokerName(registration) {
return registration.metadata.name;
}
static getEndpoint(registration) {
return registration.metadata.endpoint;
}
static getExtraNodeMetadata(registration) {
if ("extraNodes" in registration.metadata) {
return registration.metadata.extraNodes;
} else {
return [];
}
}
static getExtraLiteralMetadata(registration) {
if ("extraLiterals" in registration.metadata) {
return registration.metadata.extraLiterals;
} else {
return [];
}
}
static hashToSign(registration) {
return ChainUtil.hash([
registration.counter,
@ -173,7 +137,7 @@ class BrokerRegistration {
BrokerRegistration.hashToSign(registration));
if (!signatureRes.result) {
return signatureRes.reason;
return signatureRes;
}
return {
@ -181,8 +145,8 @@ class BrokerRegistration {
};
}
static getExtInformation(registration) {
return validateMetadata(registration.metadata);
static name() {
return "BrokerRegistration";
}
}

View file

@ -0,0 +1,205 @@
const BrokerRegistration = require('./broker-registration');
const ChainUtil = require('../util/chain-util');
const SENSHAMART_URI_PREFIX = require('../util/constants').SENSHAMART_URI_PREFIX;
describe('Broker Registration', () => {
let keyPair;
beforeEach(() => {
keyPair = ChainUtil.genKeyPair();
});
it("Construct a broker", () => {
new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [], 0);
});
it("Construct a broker with invalid counter", () => {
expect(() => new BrokerRegistration(keyPair, "hello", "test", 0, 0, "test", null, 0)).toThrow();
});
it("Construct a broker with invalid name", () => {
expect(() => new BrokerRegistration(keyPair, 1, 5, 0, 0, "test", null, 0)).toThrow();
});
it("Construct a broker with negative costPerMinute", () => {
expect(() => new BrokerRegistration(keyPair, 1, "test", -1, 0, "test", null, 0)).toThrow();
});
it("Construct a broker with invalid costPerMinute", () => {
expect(() => new BrokerRegistration(keyPair, 1, "test", 1.5, 0, "test", null, 0)).toThrow();
});
it("Construct a broker with negative costPerKB", () => {
expect(() => new BrokerRegistration(keyPair, 1, "test", 0, -1, "test", null, 0)).toThrow();
});
it("Construct a broker with invalid costPerKB", () => {
expect(() => new BrokerRegistration(keyPair, 1, "test", 0, "hello", "test", null, 0)).toThrow();
});
it("Construct a broker with invalid broker", () => {
expect(() => new BrokerRegistration(keyPair, 1, "test", 0, 0, 5, null, 0)).toThrow();
});
it("Construct a broker with negative rewardAmount", () => {
expect(() => new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", null, -1)).toThrow();
});
it("Construct a broker with invalid rewardAmount", () => {
expect(() => new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", null, "0")).toThrow();
});
it("Construct a broker with extra metadata", () => {
new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
});
it("Construct a broker invalid subject in extra metadata", () => {
expect(() => new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: 0,
p: "and",
o: "something else"
}], 0)).toThrow();
});
it("Construct a broker reserved subject in extra metadata", () => {
expect(() => new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: SENSHAMART_URI_PREFIX + "something",
p: "and",
o: "something else"
}], 0)).toThrow();
});
it("Construct a broker with invalid predicate in extra metadata", () => {
expect(() => new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: {},
o: "something else"
}], 0)).toThrow();
});
it("Construct a broker with reserved predicate in extra metadata", () => {
expect(() => new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: SENSHAMART_URI_PREFIX + "and",
o: "something else"
}], 0)).toThrow();
});
it("Construct a broker with invalid object in extra metadata", () => {
expect(() => new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: []
}], 0)).toThrow();
});
it("Construct a broker with reserved object in extra metadata", () => {
expect(() => new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: SENSHAMART_URI_PREFIX + "something else"
}], 0)).toThrow();
});
it("Changing input fails verify", () => {
const changing = new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(BrokerRegistration.verify(changing).result).toBe(true);
changing.input = ChainUtil.genKeyPair();
expect(BrokerRegistration.verify(changing).result).toBe(false);
});
it("Changing counter fails verify", () => {
const changing = new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(BrokerRegistration.verify(changing).result).toBe(true);
changing.counter++;
expect(BrokerRegistration.verify(changing).result).toBe(false);
});
it("Changing rewardAmount fails verify", () => {
const changing = new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(BrokerRegistration.verify(changing).result).toBe(true);
changing.rewardAmount++;
expect(BrokerRegistration.verify(changing).result).toBe(false);
});
it("Changing metadata name fails verify", () => {
const changing = new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(BrokerRegistration.verify(changing).result).toBe(true);
changing.metadata.name = "else";
expect(BrokerRegistration.verify(changing).result).toBe(false);
});
it("Changing metadata costPerMinute fails verify", () => {
const changing = new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(BrokerRegistration.verify(changing).result).toBe(true);
changing.metadata.costPerMinute++;
expect(BrokerRegistration.verify(changing).result).toBe(false);
});
it("Changing metadata costPerKB fails verify", () => {
const changing = new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(BrokerRegistration.verify(changing).result).toBe(true);
changing.metadata.costPerKB++;
expect(BrokerRegistration.verify(changing).result).toBe(false);
});
it("Changing metadata endpoint fails verify", () => {
const changing = new BrokerRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(BrokerRegistration.verify(changing).result).toBe(true);
changing.metadata.endpoint += "a";
expect(BrokerRegistration.verify(changing).result).toBe(false);
});
});

View file

@ -1,4 +1,4 @@
const ChainUtil = require('../chain-util');
const ChainUtil = require('../util/chain-util');
const Integration = require('./integration');
const integrationValidation = {
@ -60,6 +60,10 @@ class Compensation {
result: true,
};
}
static name() {
return "Compensation";
}
}
module.exports = Compensation;

View file

@ -1,11 +1,11 @@
const ChainUtil = require('../chain-util');
const ChainUtil = require('../util/chain-util');
const SeedRandom = require('seedrandom');
const outputValidation = {
publicKey: ChainUtil.validateIsPublicKey,
sensor: ChainUtil.validateIsString,
sensorName: ChainUtil.validateIsString,
amount: ChainUtil.createValidateIsIntegerWithMin(1),
counter: ChainUtil.createValidateIsIntegerWithMin(1)
sensorHash: ChainUtil.validateIsString,
brokerHash: ChainUtil.validateIsString
};
function validateOutputs(t) {
@ -54,12 +54,12 @@ class Integration {
}
}
static createOutput(recipientPublicKey, sensorId, amount, counter) {
static createOutput(amount, sensorName, sensorRegistrationHash, brokerRegistrationHash) {
return {
publicKey: recipientPublicKey,
sensor: sensorId,
amount: amount,
counter: counter
sensorName: sensorName,
sensorHash: sensorRegistrationHash,
brokerHash: brokerRegistrationHash
};
}
@ -128,6 +128,10 @@ class Integration {
witnesses: witnesses
};
}
static name() {
return "Integration";
}
}
module.exports = Integration;

View file

@ -1,5 +1,5 @@
const Integration = require('./integration');
const ChainUtil = require('../chain-util');
const ChainUtil = require('../util/chain-util');
function createDummyIntegration(keyPair, witnesses) {
return new Integration(

View file

@ -1,4 +1,4 @@
const ChainUtil = require('../chain-util');
const ChainUtil = require('../util/chain-util');
const outputValidation = {
publicKey: ChainUtil.validateIsPublicKey,
@ -79,6 +79,10 @@ class Payment {
result: true,
};
}
static name() {
return "Payment";
}
}
module.exports = Payment;

View file

@ -1,79 +0,0 @@
const Transaction = require('./transaction');
const Wallet = require('./index');
const { MINING_REWARD } = require('../constants');
describe('Transaction', () => {
let transaction, wallet, recipient, amount;
beforeEach(() => {
wallet = new Wallet();
amount = 50;
recipient = 'r3c1p13nt';
transaction = Transaction.newTransaction(wallet, recipient, amount);
});
it('outputs the `amount` subtracted from the wallet balance', () => {
expect(transaction.outputs.find(output => output.address === wallet.publicKey).amount)
.toEqual(wallet.balance - amount);
});
it('outputs the `amount` added to the recipient', () => {
expect(transaction.outputs.find(output => output.address === recipient).amount)
.toEqual(amount);
});
it('inputs the balance of the wallet', () => {
expect(transaction.input.amount).toEqual(wallet.balance);
});
it('validates a valid transaction', () => {
expect(Transaction.verifyTransaction(transaction)).toBe(true);
});
it('invalidates a corrupt transaction', () => {
transaction.outputs[0].amount = 50000;
expect(Transaction.verifyTransaction(transaction)).toBe(false);
});
describe('transacting with an amount that exceeds the balance', () => {
beforeEach(() => {
amount = 50000;
transaction = Transaction.newTransaction(wallet, recipient, amount);
});
it('does not create the transaction', () => {
expect(transaction).toEqual(undefined);
});
});
describe('and updating a transaction', () => {
let nextAmount, nextRecipient;
beforeEach(() => {
nextAmount = 20;
nextRecipient = 'n3xt-4ddr355';
transaction = transaction.update(wallet, nextRecipient, nextAmount);
});
it(`subtracts the next amount from the sender's output`, () => {
expect(transaction.outputs.find(output => output.address === wallet.publicKey).amount)
.toEqual(wallet.balance - amount - nextAmount);
});
it('outputs an amount for the next recipient', () => {
expect(transaction.outputs.find(output => output.address === nextRecipient).amount)
.toEqual(nextAmount);
});
});
describe('creating a reward transaction', () => {
beforeEach(() => {
transaction = Transaction.rewardTransaction(wallet, Wallet.blockchainWallet());
});
it(`reward the miner's wallet`, () => {
expect(transaction.outputs.find(output => output.address === wallet.publicKey).amount)
.toEqual(MINING_REWARD);
});
});
});

View file

@ -1,5 +1,4 @@
const Stream = require("stream");
const DataFactory = require('n3').DataFactory;
//class NamedNode {
// constructor(value) {
@ -294,32 +293,29 @@ function addQuadToMap(counter, map, key, quad, toPop) {
if (toPop.has(key)) {
popper = toPop.get(key);
} else {
popper = {
delete: false,
removing: []
};
popper = [];
toPop.set(key, popper);
}
if (map.has(key)) {
quadMap = map.get(key);
popper.removing.push(counter);
} else {
quadMap = new Map();
map.set(key, quadMap);
popper.delete = true;
}
popper.push(counter);
quadMap.set(counter, quad);
}
function popFromSource(list, map) {
for (const [key, popper] of list) {
if (popper.delete) {
const innerMap = map.get(key);
if (popper.length === innerMap.size) {
map.delete(key)
} else {
const keyMap = map.get(key);
for (const counter of popper.removing) {
keyMap.delete(counter);
for (const counter of popper) {
innerMap.delete(counter);
}
}
}
@ -342,12 +338,12 @@ class Source {
this.objects = new Map();
this.graphs = new Map();
this.all = [];
this.pop = [];
this.popping = [];
this.counter = 0;
}
startPush() {
this.pop.push({
this.popping.push({
subjects: new Map(),
predicates: new Map(),
objects: new Map(),
@ -357,7 +353,7 @@ class Source {
}
push(quad) {
const toPop = this.pop[this.pop.length - 1];
const toPop = this.popping[this.popping.length - 1];
addQuadToMap(this.counter, this.subjects, quad.subject.value, quad, toPop.subjects);
addQuadToMap(this.counter, this.predicates, quad.predicate.value, quad, toPop.predicates);
@ -369,11 +365,11 @@ class Source {
}
pop() {
if (this.pop.length === 0) {
if (this.popping.length === 0) {
throw new Error("Nothing to pop");
}
const toPop = this.pop.pop();
const toPop = this.popping.pop();
this.all.slice(0, -toPop.count);
@ -453,7 +449,7 @@ class Source {
cloneTermMap(this.graphs, returning.graphs);
this.all.forEach(item => returning.all.push(item));
this.pop.forEach(item => returning.pop.push(item));
this.popping.forEach(item => returning.popping.push(item));
returning.counter = this.counter;
return returning;
@ -461,7 +457,7 @@ class Source {
pushInto(parent) {
let on = 0;
for (const toPop of this.pop) {
for (const toPop of this.popping) {
parent.startPush();
for (const quad of this.all.slice(on, on + toPop.count)) {
parent.push(quad);

View file

@ -1,150 +1,95 @@
const ChainUtil = require('../chain-util');
const ChainUtil = require('../util/chain-util');
const SENSHAMART_URI_PREFIX = require('../util/constants').SENSHAMART_URI_PREFIX;
const tripleValidator = {
s: ChainUtil.validateIsString,
p: ChainUtil.validateIsString,
o: ChainUtil.validateIsString
};
function validateTerm(t) {
const stringRes = ChainUtil.validateIsString(t);
function validateMetadata(t) {
let isSensor = [];
let costPerMinute = [];
let costPerKB = [];
let integrationBroker = [];
const validationRes = ChainUtil.validateArray(t, ChainUtil.createValidateObject(tripleValidator));
if (!validationRes.result) {
return validationRes;
if (!stringRes.result) {
return stringRes;
}
for (const triple of t) {
switch (triple.p) {
case "http://SSM/Cost_of_Using_IoT_Devices/Cost_Per_Minute": costPerMinute.push(triple); break;
case "http://SSM/Cost_of_Using_IoT_Devices/Cost_Per_Kbyte": costPerKB.push(triple); break;
case "http://www.w3.org/1999/02/22-rdf-syntax-ns#type":
if (triple.o === "http://www.w3.org/ns/sosa/Sensor") {
isSensor.push(triple.s);
}
break;
case "http://SSM/Integration/Broker": integrationBroker.push(triple); break;
}
}
if (isSensor.length === 0) {
if (t.startsWith(SENSHAMART_URI_PREFIX)) {
return {
result: false,
reason: "No sensor is defined"
};
} else if (isSensor.length > 1) {
return {
result: false,
reason: "Multiple sensors are defined"
};
}
const sensorName = isSensor[0];
if (costPerMinute.length === 0) {
return {
result: false,
reason: "No cost per minute was defined"
};
} else if (costPerMinute.length > 1) {
return {
result: false,
reason: "Multiple cost per minutes were defined"
}
}
const CostPerMinuteValue = Number.parseInt(costPerMinute[0].o);
if (CostPerMinuteValue === NaN) {
return {
result: false,
reason: "Couldn't parse cost per minute as an integer"
};
} else if (CostPerMinuteValue < 1) {
return {
result: false,
reason: "Cost per minute was negative"
}
} else if (costPerMinute[0].s != sensorName) {
return {
result: false,
reason: "Cost per minute object isn't the broker"
};
}
if (costPerKB.length === 0) {
return {
result: false,
reason: "No cost per KB was defined"
};
} else if (costPerKB.length > 1) {
return {
result: false,
reason: "Multiple cost per KB were defined"
}
}
const CostPerKBValue = Number.parseInt(costPerKB[0].o);
if (CostPerKBValue === NaN) {
return {
result: false,
reason: "Couldn't parse cost per KB as an integer"
};
} else if (CostPerKBValue < 1) {
return {
result: false,
reason: "Cost per KB was negative"
}
} else if (costPerKB[0].s != sensorName) {
return {
result: false,
reason: "Cost per KB object isn't the broker"
};
}
if (integrationBroker.length === 0) {
return {
result: false,
reason: "No integration broker was defined"
};
} else if (integrationBroker.length > 1) {
return {
result: false,
reason: "Multiple integration brokers were defined"
};
} else if (integrationBroker[0].s != sensorName) {
return {
result: false,
reason: "Integration broker subjsect isn't the sensor"
reason: "Starts with reserved prefix"
};
}
return {
result: true,
metadata: {
sensorName: sensorName,
costPerMinute: CostPerMinuteValue,
costPerKB: CostPerKBValue,
integrationBroker: integrationBroker[0].o
}
result: true
};
}
function validateLiteral(t) {
const termRes = validateTerm(t);
if (termRes.result) {
return termRes;
}
const numberRes = ChainUtil.validateIsNumber(t);
if (numberRes.result) {
return numberRes;
}
return {
result: false,
reason: "Wasn't a string or a number"
};
}
const nodeValidator = {
s: validateTerm,
p: validateTerm,
o: validateTerm
};
const literalValidator = {
s: validateTerm,
p: validateTerm,
o: validateLiteral
};
const metadataValidation = {
name: ChainUtil.validateIsString,
costPerMinute: ChainUtil.createValidateIsIntegerWithMin(0),
costPerKB: ChainUtil.createValidateIsIntegerWithMin(0),
integrationBroker: ChainUtil.validateIsString,
extraNodes: ChainUtil.createValidateOptional(
ChainUtil.createValidateArray(
ChainUtil.createValidateObject(
nodeValidator))),
extraLiterals: ChainUtil.createValidateOptional(
ChainUtil.createValidateArray(
ChainUtil.createValidateObject(
literalValidator)))
};
const baseValidation = {
input: ChainUtil.validateIsPublicKey,
counter: ChainUtil.createValidateIsIntegerWithMin(1),
rewardAmount: ChainUtil.createValidateIsIntegerWithMin(0),
metadata: validateMetadata,
metadata: ChainUtil.createValidateObject(metadataValidation),
signature: ChainUtil.validateIsSignature
};
class SensorRegistration {
constructor(senderKeyPair, counter, metadata, rewardAmount) {
constructor(senderKeyPair, counter, sensorName, costPerMinute, costPerKB, integrationBroker, nodeMetadata, literalMetadata, rewardAmount) {
this.input = senderKeyPair.getPublic().encode('hex');
this.counter = counter;
this.rewardAmount = rewardAmount;
this.metadata = metadata;
this.metadata = {
name: sensorName,
costPerMinute: costPerMinute,
costPerKB: costPerKB,
integrationBroker: integrationBroker,
};
if (typeof nodeMetadata !== undefined && nodeMetadata !== null) {
this.metadata.extraNodes = nodeMetadata;
}
if (typeof literalMetadata !== undefined && literalMetadata !== null) {
this.metadata.extraLiterals = literalMetadata;
}
this.signature = senderKeyPair.sign(SensorRegistration.hashToSign(this));
const verification = SensorRegistration.verify(this);
@ -153,6 +98,38 @@ class SensorRegistration {
}
}
static getSensorName(registration) {
return registration.metadata.name;
}
static getCostPerMinute(registration) {
return registration.metadata.costPerMinute;
}
static getCostPerKB(registration) {
return registration.metadata.costPerKB;
}
static getIntegrationBroker(registration) {
return registration.metadata.integrationBroker;
}
static getExtraNodeMetadata(registration) {
if ("extraNodes" in registration.metadata) {
return registration.metadata.extraNodes;
} else {
return [];
}
}
static getExtraLiteralMetadata(registration) {
if ("extraLiterals" in registration.metadata) {
return registration.metadata.extraLiterals;
} else {
return [];
}
}
static hashToSign(registration) {
return ChainUtil.hash([
registration.counter,
@ -163,8 +140,7 @@ class SensorRegistration {
static verify(registration) {
const validationResult = ChainUtil.validateObject(registration, baseValidation);
if (!validationResult.result) {
console.log(`Failed validation: ${validationResult.reason}`);
return false;
return validationResult;
}
const verifyRes = ChainUtil.verifySignature(
@ -180,8 +156,8 @@ class SensorRegistration {
};
}
static getExtInformation(registration) {
return validateMetadata(registration.metadata);
static name() {
return "SensorRegistration";
}
}

View file

@ -1,109 +1,205 @@
const Transaction = require('./transaction');
const Metadata = require('./metadata');
const Wallet = require('./index');
const { MINING_REWARD } = require('../constants');
const SensorRegistration = require('./sensor-registration');
const ChainUtil = require('../util/chain-util');
const SENSHAMART_URI_PREFIX = require('../util/constants').SENSHAMART_URI_PREFIX;
describe('Transaction & Metadata', () => {
let transaction, metadata, wallet, recipient, amount,
senderWallet,Name,Geo ,IP_URL , Topic_Token, Permission,
RequestDetail, OrgOwner, DepOwner,PrsnOwner, PaymentPerKbyte,
PaymentPerMinute, Protocol, MessageAttributes, Interval,
FurtherDetails, SSNmetadata;
describe('Sensor Registration', () => {
let keyPair;
beforeEach(() => {
wallet = new Wallet();
amount = 50;
recipient = 'r3c1p13nt';
senderWallet = new Wallet();
Name = 'IoT_Lab_Temp_Sensor'
Geo = [1.045,0.0135]
IP_URL = 'www.IoT-locationbar.com/sensors/temp'
Topic_Token = 'ACCESS_TOKEN'
Permission = 'Public'
RequestDetail = 'Null'
OrgOwner = 'Swinburne_University'
DepOwner = 'Computer_Science'
PrsnOwner = 'Anas_Dawod'
PaymentPerKbyte = 10
PaymentPerMinute = 5
Protocol = 'MQTT'
MessageAttributes = 'null'
Interval = 10
FurtherDetails = 'null'
SSNmetadata = 'null'
transaction = Transaction.newTransaction(wallet, recipient, amount);
metadata = Metadata.newMetadata(senderWallet,Name,Geo ,IP_URL , Topic_Token, Permission,
RequestDetail, OrgOwner, DepOwner,PrsnOwner, PaymentPerKbyte,
PaymentPerMinute, Protocol, MessageAttributes, Interval,
FurtherDetails, SSNmetadata)
keyPair = ChainUtil.genKeyPair();
});
it('outputs the `amount` subtracted from the wallet balance', () => {
expect(transaction.outputs.find(output => output.address === wallet.publicKey).amount)
.toEqual(wallet.balance - amount);
it("Construct a sensor", () => {
new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [], 0);
});
it('outputs the `amount` added to the recipient', () => {
expect(transaction.outputs.find(output => output.address === recipient).amount)
.toEqual(amount);
it("Construct a sensor with invalid counter", () => {
expect(() => new SensorRegistration(keyPair, "hello", "test", 0, 0, "test", null, 0)).toThrow();
});
it('inputs the balance of the wallet', () => {
expect(transaction.input.amount).toEqual(wallet.balance);
it("Construct a sensor with invalid name", () => {
expect(() => new SensorRegistration(keyPair, 1, 5, 0, 0, "test", null, 0)).toThrow();
});
it('validates a valid transaction', () => {
expect(Transaction.verifyTransaction(transaction)).toBe(true);
it("Construct a sensor with negative costPerMinute", () => {
expect(() => new SensorRegistration(keyPair, 1, "test", -1, 0, "test", null, 0)).toThrow();
});
it('validates a valid metadata', () => {
expect(Metadata.verifyMetadata(metadata)).toBe(true);
it("Construct a sensor with invalid costPerMinute", () => {
expect(() => new SensorRegistration(keyPair, 1, "test", 1.5, 0, "test", null, 0)).toThrow();
});
it('invalidates a corrupt transaction', () => {
transaction.outputs[0].amount = 50000;
expect(Transaction.verifyTransaction(transaction)).toBe(false);
it("Construct a sensor with negative costPerKB", () => {
expect(() => new SensorRegistration(keyPair, 1, "test", 0, -1, "test", null, 0)).toThrow();
});
describe('transacting with an amount that exceeds the balance', () => {
beforeEach(() => {
amount = 50000;
transaction = Transaction.newTransaction(wallet, recipient, amount);
it("Construct a sensor with invalid costPerKB", () => {
expect(() => new SensorRegistration(keyPair, 1, "test", 0, "hello", "test", null, 0)).toThrow();
});
it('does not create the transaction', () => {
expect(transaction).toEqual(undefined);
});
it("Construct a sensor with invalid broker", () => {
expect(() => new SensorRegistration(keyPair, 1, "test", 0, 0, 5, null, 0)).toThrow();
});
describe('and updating a transaction', () => {
let nextAmount, nextRecipient;
beforeEach(() => {
nextAmount = 20;
nextRecipient = 'n3xt-4ddr355';
transaction = transaction.update(wallet, nextRecipient, nextAmount);
it("Construct a sensor with negative rewardAmount", () => {
expect(() => new SensorRegistration(keyPair, 1, "test", 0, 0, "test", null, -1)).toThrow();
});
it(`subtracts the next amount from the sender's output`, () => {
expect(transaction.outputs.find(output => output.address === wallet.publicKey).amount)
.toEqual(wallet.balance - amount - nextAmount);
it("Construct a sensor with invalid rewardAmount", () => {
expect(() => new SensorRegistration(keyPair, 1, "test", 0, 0, "test", null, "0")).toThrow();
});
it('outputs an amount for the next recipient', () => {
expect(transaction.outputs.find(output => output.address === nextRecipient).amount)
.toEqual(nextAmount);
});
it("Construct a sensor with extra metadata", () => {
new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
});
describe('creating a reward transaction', () => {
beforeEach(() => {
transaction = Transaction.rewardTransaction(wallet, Wallet.blockchainWallet());
it("Construct a sensor invalid subject in extra metadata", () => {
expect(() => new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: 0,
p: "and",
o: "something else"
}], 0)).toThrow();
});
it(`reward the miner's wallet`, () => {
expect(transaction.outputs.find(output => output.address === wallet.publicKey).amount)
.toEqual(MINING_REWARD);
it("Construct a sensor reserved subject in extra metadata", () => {
expect(() => new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: SENSHAMART_URI_PREFIX + "something",
p: "and",
o: "something else"
}], 0)).toThrow();
});
it("Construct a sensor with invalid predicate in extra metadata", () => {
expect(() => new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: {},
o: "something else"
}], 0)).toThrow();
});
it("Construct a sensor with reserved predicate in extra metadata", () => {
expect(() => new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: SENSHAMART_URI_PREFIX + "and",
o: "something else"
}], 0)).toThrow();
});
it("Construct a sensor with invalid object in extra metadata", () => {
expect(() => new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: []
}], 0)).toThrow();
});
it("Construct a sensor with reserved object in extra metadata", () => {
expect(() => new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: SENSHAMART_URI_PREFIX + "something else"
}], 0)).toThrow();
});
it("Changing input fails verify", () => {
const changing = new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(SensorRegistration.verify(changing).result).toBe(true);
changing.input = ChainUtil.genKeyPair();
expect(SensorRegistration.verify(changing).result).toBe(false);
});
it("Changing counter fails verify", () => {
const changing = new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(SensorRegistration.verify(changing).result).toBe(true);
changing.counter++;
expect(SensorRegistration.verify(changing).result).toBe(false);
});
it("Changing rewardAmount fails verify", () => {
const changing = new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(SensorRegistration.verify(changing).result).toBe(true);
changing.rewardAmount++;
expect(SensorRegistration.verify(changing).result).toBe(false);
});
it("Changing metadata name fails verify", () => {
const changing = new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(SensorRegistration.verify(changing).result).toBe(true);
changing.metadata.name = "else";
expect(SensorRegistration.verify(changing).result).toBe(false);
});
it("Changing metadata costPerMinute fails verify", () => {
const changing = new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(SensorRegistration.verify(changing).result).toBe(true);
changing.metadata.costPerMinute++;
expect(SensorRegistration.verify(changing).result).toBe(false);
});
it("Changing metadata costPerKB fails verify", () => {
const changing = new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(SensorRegistration.verify(changing).result).toBe(true);
changing.metadata.costPerKB++;
expect(SensorRegistration.verify(changing).result).toBe(false);
});
it("Changing metadata integrationBroker fails verify", () => {
const changing = new SensorRegistration(keyPair, 1, "test", 0, 0, "test", [{
s: "something",
p: "and",
o: "something else"
}], 0);
expect(SensorRegistration.verify(changing).result).toBe(true);
changing.metadata.integrationBroker += "a";
expect(SensorRegistration.verify(changing).result).toBe(false);
});
});

View file

@ -1,7 +1,8 @@
const Payment = require('./payment');
const Integration = require('./integration');
const SensorRegistration = require('./sensor-registration');
const BrokerRegistration = require('./broker-registration');
const Integration = require('./integration');
const Payment = require('./payment');
const Compensation = require('./compensation');
class Transaction {
constructor(transaction, type) {
@ -10,9 +11,13 @@ class Transaction {
this.type = type;
}
static mapId(type) {
return type.name();
}
static ALL_TYPES = [
SensorRegistration,
BrokerRegistration,
Integration,
Payment,
Compensation
];
};
module.exports = Transaction;

26
blockchain/uris.js Normal file
View file

@ -0,0 +1,26 @@
const PREFIX = require('../util/constants').SENSHAMART_URI_PREFIX;
module.exports = {
PREDICATE: {
IS_OWNED_BY: PREFIX + "IsOwnedBy",
DEFINES: PREFIX + "Defines",
HAS_COUNTER: PREFIX + "HasCounter",
COSTS_PER_MINUTE: PREFIX + "CostsPerMinute",
COSTS_PER_KB: PREFIX + "CostsPerKB",
USES_BROKER: PREFIX + "UsesBroker",
HAS_ENDPOINT: PREFIX + "HasEndpoint",
CONTAINS_TRANSACTION: PREFIX + "ContainsTransaction",
CONTAINS_SENSOR_REGISTRATION: PREFIX + "ContainsSensorRegistration",
CONTAINS_BROKER_REGISTRATION: PREFIX + "ContainsBrokerRegistration",
TYPE: "http://www.w3.org/1999/02/22-rdf-syntax-ns#type",
LAST_BLOCK: PREFIX + "LastBlock"
},
OBJECT: {
SENSOR_REGISTRATION: PREFIX + "SensorRegistration",
BROKER_REGISTRATION: PREFIX + "BrokerRegistration",
TRANSACTION: PREFIX + "Transaction",
WALLET: PREFIX + "Wallet",
BLOCK: PREFIX + "Block",
}
};

View file

@ -1,16 +1,20 @@
//BROKER
const express = require('express');
const bodyParser = require('body-parser');
const P2pServer = require('../p2p-server');
const BlockchainProp = require('../network/blockchain-prop');
const Broker = require('./broker');
const Aedes = require('aedes');
const Config = require('../config');
const ChainUtil = require('../chain-util');
const Config = require('../util/config');
const ChainUtil = require('../util/chain-util');
const QueryEngine = require('@comunica/query-sparql-rdfjs').QueryEngine;
const Blockchain = require('../blockchain/blockchain');
const Block = require('../blockchain/block');
const Integration = require('../blockchain/integration');
const SensorRegistration = require('../blockchain/sensor-registration');
const BrokerRegistration = require('../blockchain/sensor-registration');
'use strict';
@ -18,10 +22,9 @@ const {
DEFAULT_PORT_BROKER_API,
DEFAULT_PORT_BROKER_CHAIN,
DEFAULT_PORT_BROKER_SENSOR_HANDSHAKE,
DEFAULT_PORT_BROKER_SENSOR_MQTT,
DEFAULT_PORT_BROKER_CLIENT_MQTT,
DEFAULT_PORT_BROKER_MQTT,
DEFAULT_PORT_MINER_CHAIN
} = require('../constants');
} = require('../util/constants');
const CONFIGS_STORAGE_LOCATION = "./settings.json";
@ -56,107 +59,221 @@ const sensorHandshakePort = config.get({
key: "broker-sensor-handshake-port",
default: DEFAULT_PORT_BROKER_SENSOR_HANDSHAKE
});
const sensorMQTTPort = config.get({
key: "broker-sensor-MQTT-port",
default: DEFAULT_PORT_BROKER_SENSOR_MQTT
});
const clientMQTTPort = config.get({
key: "broker-client-MQTT-port",
default: DEFAULT_PORT_BROKER_CLIENT_MQTT
const MQTTPort = config.get({
key: "broker-MQTT-port",
default: DEFAULT_PORT_BROKER_MQTT
});
const blockchain = Blockchain.loadFromDisk(blockchainLocation);
let sensorsServing = {};
function minutesNow() {
//divide by 1000 for ms, 60 for seconds, and floor to get whole minutes passed
return Date.now() / (1000 * 60);
}
const sensorMQTT = new Aedes({
/*
Sensor name -> {
Integration Hash -> {
sensor per kb
sensor per min
dataLastAt
coinsLeft
index
}
}
*/
const ourIntegrations = new Map();
const ourSensors = new Map();
const sensorOwnerHistory = [];
function onBlockchainChange(newBlocks, oldBlocks, difference) {
const popCount = oldBlocks.length - difference;
for (let i = 0; i < popCount; i++) {
const changing = sensorOwnerHistory.pop();
for (const sensorName of changing.removing) {
ourSensors.delete(sensorName);
console.log(`No longer brokering due to pop: ${sensorName}`);
}
for (const sensor of changing.adding) {
ourSensors.set(SensorRegistration.getSensorName(sensor), sensor);
console.log(`Now brokering due to pop: ${SensorRegistration.getSensorName(sensor)}`);
}
}
//Integration hash -> Integration
const removedIntegrations = new Map();
for (let i = difference; i < oldBlocks.length; i++) {
for (const integration of Block.getIntegrations(oldBlocks[i])) {
removedIntegrations.set(Integration.hashToSign(integration), integration);
}
}
//see what's added, then see what's removed
//if it's been removed and added, we don't change anything, else we do the respective operation
for (let i = difference; i < newBlocks.length; i++) {
//play with the new integrations
const newHistory = {
adding: [],
removing: []
};
for (const integration of Block.getIntegrations(newBlocks[i])) {
const integrationHash = Integration.hashToSign(integration);
for (let i = 0; i < integration.outputs.length; i++) { //for every output
const output = integration.outputs[i];
if (ourSensors.has(output.sensorName)) { //if it references one of our sensors
const sensor = ourSensors.get(output.sensorName);
if (!ourIntegrations.has(output.sensorName)) { //if the entry for this sensor doesn't exist
ourIntegrations.set(output.sensorName, new Map()); //make it
}
const integrationMap = ourIntegrations.get(output.sensorName);
if (integrationMap.has(integrationHash)) { //if it already exists
removedIntegrations.delete(integrationHash); //remove it from the removed map, as it's still present in the new chain
} else { //else
console.log(`Starting to integrate for integration: ${integrationHash}, sensor: ${output.sensorName}, perMin: ${SensorRegistration.getCostPerMinute(sensor)}, costPerKB: ${SensorRegistration.getCostPerKB(sensor)}`);
integrationMap.set(Integration.hashToSign(integration), //add the integration
{
perKB: SensorRegistration.getCostPerKB(sensor),
perMin: SensorRegistration.getCostPerMinute(sensor),
dataLastAt: minutesNow(),
coinsLeft: output.amount,
index: i
});
}
}
}
}
//playing with integrations done, now update which sensors we own
for (const sensorRegistration of Block.getSensorRegistrations(newBlocks[i])) {
const sensorName = SensorRegistration.getSensorName(sensorRegistration);
if (ourSensors.has(sensorName)) { //if this sensor is currently one of ours
const existingSensor = ourSensors.get(sensorName);
if (SensorRegistration.getIntegrationBroker(sensorRegistration) !== broker_name) {//if the broker is now not us
newHistory.adding.push(existingSensor);
ourSensors.delete(sensorName);
console.log(`No longer brokering due to push: ${sensorName}`);
} else {
newHistory.adding.push(existingSensor);
ourSensors.set(sensorName, sensorRegistration);
console.log(`Updated brokering of ${sensorName}`);
}
} else { //else, we don't currently own this sensor
if (SensorRegistration.getIntegrationBroker(sensorRegistration) === broker_name) {
newHistory.removing.push(sensorName);
ourSensors.set(sensorName, sensorRegistration);
console.log(`Now brokering due to push: ${sensorName}`);
}
}
}
sensorOwnerHistory.push(newHistory);
}
for (const [hash, integration] of removedIntegrations) {
for (const output of integration.outputs) {
if (ourSensors.has(output.sensorName)) {
ourSensors.get(output.sensorName).integrations.remove(hash);
}
}
}
}
blockchain.addListener(onBlockchainChange);
onBlockchainChange(blockchain.blocks(), [], 0);
const mqtt = new Aedes({
id: broker_name
});
const sensorMQTTServer = require('net').createServer(sensorMQTT.handle);
const sensorMQTTSubscriptions = {};
const clientMQTT = new Aedes({
id: broker_name
});
const clientMQTTServer = require('net').createServer(clientMQTT.handle);
const MQTTServer = require('net').createServer(mqtt.handle);
function onNewPacket(sensor, data) {
//check to see if sensor has been paid for
clientMQTT.publish({
topic: sensor,
payload: data
});
}
console.log(`New packet from ${sensor} with size ${data.length}`);
function onChainServerRecv(data) {
const replaceResult = blockchain.replaceChain(Blockchain.deserialize(data));
if (!replaceResult.result) {
console.log(`Failed to replace chain: ${replaceResult.reason}`);
//failed to replace
const foundSensor = ourIntegrations.get(sensor);
if (typeof foundSensor === "undefined") {
return;
}
blockchain.saveToDisk(blockchainLocation);
const now = minutesNow();
sensorsServing = {};
const removing = [];
for (const sensorName in blockchain.sensors) {
const sensorData = blockchain.sensors[sensorName];
if (sensorData.integrationBroker === broker_name) {
sensorsServing[sensorName] = sensorData;
for (const [hash, info] of foundSensor) {
const timeDelta = now - info.dataLastAt;
const cost =
timeDelta * info.perMin
+ data.length / 1024 * info.perKB;
console.log(`out/${hash}/${info.index} = timeDelta: ${timeDelta}, cost: ${cost}`);
if (cost >= info.coinsLeft) {
//we're out of money, integration is over
console.log(`out of coins for ${hash}`);
removing.push(hash);
} else {
info.coinsLeft -= cost;
info.dataLastAt = now;
mqtt.publish({
topic: "out/" + hash + '/' + info.index,
payload: data
});
}
}
//UNSUBSCRIBE
for (const sensorName in sensorMQTTSubscriptions) {
if (!(sensorName in sensorsServing)) {
const deliverFunction = sensorMQTTSubscriptions[sensorName];
sensorMQTT.unsubscribe(sensorName, deliverFunction, () => { });
delete sensorMQTTSubscriptions[sensorName];
}
}
//SUBSCRIBE
for (const sensorName in sensorsServing) {
if (!(sensorName in sensorMQTTSubscriptions)) {
const deliverFunction = (packet, cb) => {
onNewPacket(packet.topic, packet.payload);
cb();
};
sensorMQTTSubscriptions[sensorName] = deliverFunction;
sensorMQTT.subscribe(sensorName, deliverFunction, () => { });
for (const hash of removing) {
foundSensor.delete(hash);
}
if (foundSensor.size === 0) {
ourIntegrations.delete(sensor);
}
}
//can only subscribe to out/
mqtt.authorizeSubscribe = function (client, sub, callback) {
if (!sub.topic.startsWith("out/")) {
console.log(`Failed subscribe to topic ${sub.topic} by ${client}`);
return callback(new Error("Can't sub to this topic"));
} else {
console.log(`Subscription by ${client} to ${sub.topic}`);
}
callback(null, sub)
}
//can only publish to in/
mqtt.authorizePublish = function (client, packet, callback) {
if (!packet.topic.startsWith("in/")) {
console.log(`Failed publish to topic ${packet.topic} by ${client}`);
return callback(new Error("Can't publish to this topic"))
} else {
console.log(`Publish by ${client} to ${packet.topic} of size ${packet.payload.length}`);
onNewPacket(packet.topic.substring(3), packet.payload);
}
callback(null)
}
//this will change maybe
mqtt.authenticate = function (client, username, password, callback) {
callback(null, true)
}
function onSensorHandshakeMsg(sensor, data) {
onNewPacket(sensor, data);
}
const chainServer = new P2pServer("Chain-server");
chainServer.start(chainServerPort, chainServerPeers, (_) => { }, onChainServerRecv);
const chainServer = new BlockchainProp("Chain-server", blockchain);
chainServer.start(chainServerPort, null, chainServerPeers);
broker.start(sensorHandshakePort, onSensorHandshakeMsg);
sensorMQTTServer.listen(sensorMQTTPort, () => {
MQTTServer.listen(MQTTPort, () => {
console.log("Sensor MQTT started");
});
clientMQTTServer.listen(clientMQTTPort, () => {
console.log("Client MQTT started");
});
const app = express();
app.use(bodyParser.json());
app.listen(apiPort, () => console.log(`Listening on port ${apiPort}`));
app.get('/sensors', (req, res) => {
res.json(sensorsServing);
app.get('/ourSensors', (req, res) => {
res.json(ourSensors);
});
app.get('/ChainServer/sockets', (req, res) => {

View file

@ -3,7 +3,7 @@ const Websocket = require('ws');
//const Aedes = require('aedes')(); /* aedes is a stream-based MQTT broker */
//const MQTTserver = require('net').createServer(aedes.handle);
const ChainUtil = require('../chain-util');
const ChainUtil = require('../util/chain-util');
const crypto = require('crypto');
const STATE_CLIENT_HELLOING = 0;

18
demo/CMakeLists.txt Normal file
View file

@ -0,0 +1,18 @@
cmake_minimum_required (VERSION 3.8)
SET(CMAKE_POLICY_DEFAULT_CMP0077 NEW)
project("senshamart demo" C CXX)
if(EXISTS "sensor_client")
add_subdirectory("sensor_client")
endif()
if(EXISTS "milk_client")
add_subdirectory("milk_client")
endif()
if(EXISTS "camera_client")
add_subdirectory("camera_client")
endif()
if(EXISTS "demo_show_video")
add_subdirectory("demo_show_video")
endif()

27
demo/CMakeSettings.json Normal file
View file

@ -0,0 +1,27 @@
{
"configurations": [
{
"name": "x64-Debug",
"generator": "Ninja",
"configurationType": "Debug",
"inheritEnvironments": [ "msvc_x64_x64" ],
"buildRoot": "${projectDir}\\out\\build\\${name}",
"installRoot": "${projectDir}\\out\\install\\${name}",
"cmakeCommandArgs": "",
"buildCommandArgs": "",
"ctestCommandArgs": ""
},
{
"name": "x64-Release",
"generator": "Ninja",
"configurationType": "RelWithDebInfo",
"buildRoot": "${projectDir}\\out\\build\\${name}",
"installRoot": "${projectDir}\\out\\install\\${name}",
"cmakeCommandArgs": "",
"buildCommandArgs": "",
"ctestCommandArgs": "",
"inheritEnvironments": [ "msvc_x64_x64" ],
"variables": []
}
]
}

View file

@ -0,0 +1,78 @@
# CMakeList.txt : CMake project for brimbank, include source and define
# project specific logic here.
#
cmake_minimum_required (VERSION 3.8)
SET(CMAKE_POLICY_DEFAULT_CMP0077 NEW)
project("senshamart" C CXX)
find_package(OpenCV REQUIRED)
if(NOT WIN32)
add_subdirectory("dependencies/libvisiontransfer")
endif()
include(FindFFmpeg.cmake)
find_package(PCL 1.3 COMPONENTS common io filters)
if(PCL_FOUND)
include_directories(${PCL_INCLUDE_DIRS})
link_directories(${PCL_LIBRARY_DIRS})
add_definitions(${PCL_DEFINITIONS})
else()
message(WARNING "nerian_stream_pc_aws will crash!")
endif()
# Add source to this project's executable.
add_library(camera_demo_client STATIC
"src/camera.cpp")
target_include_directories(camera_demo_client PRIVATE
"private_include"
${FFMPEG_INCLUDE_DIRS})
target_include_directories(camera_demo_client PUBLIC
"public_include"
${OpenCV_INCLUDE_DIRS})
target_link_libraries(camera_demo_client PRIVATE
${FFMPEG_LIBRARIES}
${OpenCV_LIBRARIES}
senshamart_client)
target_compile_features(camera_demo_client PUBLIC
cxx_std_17)
add_executable(camera_demo_client_scratch
"scratch/scratch.cpp")
target_include_directories(camera_demo_client_scratch PRIVATE
${OpenCV_INCLUDE_DIRS}
${FFMPEG_INCLUDE_DIRS}
)
target_link_libraries(camera_demo_client_scratch PRIVATE
camera_demo_client
${OpenCV_LIBRARIES}
${FFMPEG_LIBRARIES}
)
if(NOT WIN32)
add_executable(camera_demo
"nerian_stream_unified/nerian_stream_unified.cpp")
target_include_directories(camera_demo PRIVATE
${OpenCV_INCLUDE_DIRS}
${FFMPEG_INCLUDE_DIRS}
)
target_link_libraries(camera_demo PRIVATE
camera_demo_client
${OpenCV_LIBRARIES}
${FFMPEG_LIBRARIES}
${LIB_SUFFIX} ${PCL_COMMON_LIBRARIES} ${PCL_IO_LIBRARIES} ${PCL_FILTERS_LIBRARIES} ${EXTRA_LIBS}
visiontransfer${LIB_SUFFIX})
endif()

View file

@ -0,0 +1,27 @@
{
"configurations": [
{
"name": "x64-Debug",
"generator": "Ninja",
"configurationType": "Debug",
"inheritEnvironments": [ "msvc_x64_x64" ],
"buildRoot": "${projectDir}\\out\\build\\${name}",
"installRoot": "${projectDir}\\out\\install\\${name}",
"cmakeCommandArgs": "",
"buildCommandArgs": "",
"ctestCommandArgs": ""
},
{
"name": "x64-Release",
"generator": "Ninja",
"configurationType": "RelWithDebInfo",
"buildRoot": "${projectDir}\\out\\build\\${name}",
"installRoot": "${projectDir}\\out\\install\\${name}",
"cmakeCommandArgs": "",
"buildCommandArgs": "",
"ctestCommandArgs": "",
"inheritEnvironments": [ "msvc_x64_x64" ],
"variables": []
}
]
}

View file

@ -0,0 +1,39 @@
# rules for finding the FFmpeg libraries
if(WIN32)
find_package(FFMPEG)
else()
find_package(PkgConfig REQUIRED)
pkg_check_modules(PC_FFMPEG REQUIRED libavformat libavcodec libavutil libswscale)
find_path(AVFORMAT_INCLUDE_DIR libavformat/avformat.h HINTS ${PC_FFMPEG_LIBAVFORMAT_INCLUDEDIR} ${PC_FFMPEG_INCLUDE_DIRS})
find_library(AVFORMAT_LIBRARY NAMES libavformat avformat HINTS ${PC_FFMPEG_LIBAVFORMAT_LIBDIR} ${PC_FFMPEG_LIBRARY_DIRS})
find_path(AVCODEC_INCLUDE_DIR libavcodec/avcodec.h HINTS ${PC_FFMPEG_LIBAVCODEC_INCLUDEDIR} ${PC_FFMPEG_INCLUDE_DIRS})
find_library(AVCODEC_LIBRARY NAMES libavcodec avcodec HINTS ${PC_FFMPEG_LIBAVCODEC_LIBDIR} ${PC_FFMPEG_LIBRARY_DIRS})
find_path(AVUTIL_INCLUDE_DIR libavutil/avutil.h HINTS ${PC_FFMPEG_LIBAVUTIL_INCLUDEDIR} ${PC_FFMPEG_INCLUDE_DIRS})
find_library(AVUTIL_LIBRARY NAMES libavutil avutil HINTS ${PC_FFMPEG_LIBAVUTIL_LIBDIR} ${PC_FFMPEG_LIBRARY_DIRS})
find_path(SWSCALE_INCLUDE_DIR libswscale/swscale.h HINTS ${PC_FFMPEG_LIBSWSCALE_INCLUDEDIR} ${PC_FFMPEG_INCLUDE_DIRS})
find_library(SWSCALE_LIBRARY NAMES libawscale swscale HINTS ${PC_FFMPEG_LIBSWSCALE_LIBDIR} ${PC_FFMPEG_LIBRARY_DIRS})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(AVFormat DEFAULT_MSG AVFORMAT_LIBRARY AVFORMAT_INCLUDE_DIR)
find_package_handle_standard_args(AVCodec DEFAULT_MSG AVCODEC_LIBRARY AVCODEC_INCLUDE_DIR)
find_package_handle_standard_args(AVUtil DEFAULT_MSG AVUTIL_LIBRARY AVUTIL_INCLUDE_DIR)
find_package_handle_standard_args(SWScale DEFAULT_MSG SWSCALE_LIBRARY SWSCALE_INCLUDE_DIR)
mark_as_advanced(AVFORMAT_INCLUDE_DIR AVFORMAT_LIBRARY)
mark_as_advanced(AVCODEC_INCLUDE_DIR AVCODEC_LIBRARY)
mark_as_advanced(AVUTIL_INCLUDE_DIR AVUTIL_LIBRARY)
mark_as_advanced(SWSCALE_INCLUDE_DIR SWSCALE_LIBRARY)
set(FFMPEG_INCLUDE_DIRS ${AVFORMAT_INCLUDE_DIR} ${AVCODEC_INCLUDE_DIR} ${AVUTIL_INCLUDE_DIR} ${SWSCALE_INCLUDE_DIR})
set(FFMPEG_LIBRARIES ${AVFORMAT_LIBRARY} ${AVCODEC_LIBRARY} ${AVUTIL_LIBRARY} ${SWSCALE_LIBRARY})
if(${AVFORMAT_FOUND} AND ${AVCODEC_FOUND} AND ${AVUTIL_FOUND} AND ${SWSCALE_FOUND})
set(FFMPEG_FOUND TRUE)
else()
set(FFMPEG_FOUND FALSE)
endif()
endif()

View file

@ -0,0 +1,195 @@
cmake_minimum_required(VERSION 3.0.0)
if(COMMAND cmake_policy)
cmake_policy(SET CMP0003 NEW)
endif(COMMAND cmake_policy)
MATH(EXPR BITS ${CMAKE_SIZEOF_VOID_P}*8)
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
# Search for PCL for example compilation
set(DISABLE_PCL 0 CACHE BOOL "Disables OpenCL example")
if(NOT DISABLE_PCL)
message(STATUS "looking for PCL")
find_package(PCL 1.3 COMPONENTS common io filters)
if(PCL_FOUND)
include_directories(${PCL_INCLUDE_DIRS})
link_directories(${PCL_LIBRARY_DIRS})
add_definitions(${PCL_DEFINITIONS})
else()
message(WARNING "Not building PCL example!")
endif()
endif()
# Search for Open3D for example compilation
set(DISABLE_OPEN3D 0 CACHE BOOL "Disables Open3D example")
if(NOT DISABLE_OPEN3D)
# Search for Open3D
message(STATUS "looking for Open3D")
find_package(Open3D)
if(Open3D_FOUND)
include_directories(${Open3D_INCLUDE_DIRS})
# Check Open3D C++ ABI
get_property(def TARGET Open3D::Open3D PROPERTY INTERFACE_COMPILE_DEFINITIONS)
if(def MATCHES "GLIBCXX_USE_CXX11_ABI=0")
set(OPEN3D_CXX11_ABI 0)
else()
set(OPEN3D_CXX11_ABI 1)
endif()
# Check system C++ ABI
include(CheckCXXSourceCompiles)
check_cxx_source_compiles("\
#include <string>\n\
#if _GLIBCXX_USE_CXX11_ABI == 0\n\
#error\n\
#endif\n\
int main(int, char**) {return 0;}"
SYSTEM_CXX11_ABI)
# Check if ABIs match
set(OPEN3D_LIB_SUFFIX "")
if(NOT MSVC)
if(${SYSTEM_CXX11_ABI} AND (NOT ${OPEN3D_CXX11_ABI}))
message(WARNING
"Open3D was built with old C++ ABI (_GLIBCXX_USE_CXX11_ABI=0). "
"A separate version of libvisiontransfer will be built for linking "
"against Open3D. Using Open3D in combination with other libraries "
"that are built with the more recent C++ ABI will not be possible.")
set(BUILD_WITHOUT_CXX11_ABI 1)
set(OPEN3D_LIB_SUFFIX "-without-cxx11-abi${LIB_SUFFIX}")
endif()
endif()
else()
message(WARNING "Not building Open3D example!")
endif()
endif()
# Search for OpenCV for example compilation
set(DISABLE_OPENCV 0 CACHE BOOL "Disables OpenCV example")
if(NOT DISABLE_OPENCV)
message(STATUS "looking for OpenCV")
find_package(OpenCV)
if(OpenCV_FOUND)
include_directories(${OpenCV_INCLUDE_DIRS})
else()
message(WARNING "Not building OpenCV example!")
endif()
endif()
set(DISABLE_NATIVE 0 CACHE BOOL "Disables native architecture compile flag")
if(NOT WIN32 OR MINGW)
include(CheckCXXCompilerFlag)
# Some useful flags
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -Wall")
CHECK_CXX_COMPILER_FLAG("-march=native" NATIVE_ARCH_SUPPORT)
if(NATIVE_ARCH_SUPPORT AND NOT DISABLE_NATIVE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
endif()
# Activate c++11 or newer support
CHECK_CXX_COMPILER_FLAG("-std=c++14" COMPILER_SUPPORTS_CXX14)
CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11)
CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X)
if(COMPILER_SUPPORTS_CXX14)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14")
elseif(COMPILER_SUPPORTS_CXX11)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
elseif(COMPILER_SUPPORTS_CXX0X)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x")
else()
message(WARNING "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.")
endif()
else()
set(CMAKE_DEBUG_POSTFIX "-debug")
endif()
# Search for python3
set(DISABLE_PYTHON 0 CACHE BOOL "Disables python library")
if(NOT DISABLE_PYTHON)
message(STATUS "looking for python3")
if(WIN32 AND NOT MINGW)
# Make sure we use the Windows python, not a matching one from msys!
set(USERPROFILE $ENV{USERPROFILE})
if(${BITS} EQUAL 32)
file(GLOB Python3_EXECUTABLE
"${USERPROFILE}/AppData/Local/Programs/Python/Python3?-32/python.exe"
"${USERPROFILE}/AppData/Local/Programs/Python/Python3??-32/python.exe")
else()
file(GLOB Python3_EXECUTABLE
"${USERPROFILE}/AppData/Local/Programs/Python/Python3?/python.exe"
"${USERPROFILE}/AppData/Local/Programs/Python/Python3??/python.exe")
endif()
message(WARNING "Windows build - assuming Python 3 is \"${Python3_EXECUTABLE}\".")
else()
set(Python3_EXECUTABLE "python3")
# This is for CMake 3.12 and up; making sure we get python3
find_package (Python3 COMPONENTS Interpreter)
if(NOT Python3_FOUND)
find_package (Python3 COMPONENTS Interpreter HINTS "/mingw64")
endif()
if(NOT Python3_FOUND)
# We don't give up just yet
message(WARNING "Failed finding python3 with FindPython3. Assuming python3 is \"${Python3_EXECUTABLE}\"")
endif()
endif()
# Search for cython
message(STATUS "looking for Cython")
execute_process(COMMAND "${Python3_EXECUTABLE}" "-c"
"\
from distutils.core import setup\n\
from distutils.extension import Extension\n\
from Cython.Build import cythonize\n\
print('OK')\
"
OUTPUT_VARIABLE cython_output
OUTPUT_STRIP_TRAILING_WHITESPACE)
if("${cython_output}" STREQUAL "OK")
set(BUILD_CYTHON 1)
else()
message(WARNING "${cython_output}")
message(WARNING "Cython not found! Not building python library!")
endif()
# Search for python-wheel
message(STATUS "looking for Wheel")
execute_process(COMMAND "${Python3_EXECUTABLE}" "-c"
"\
import wheel\n\
print('OK')\
"
OUTPUT_VARIABLE wheel_output
OUTPUT_STRIP_TRAILING_WHITESPACE)
if("${wheel_output}" STREQUAL "OK")
set(BUILD_WHEEL 1)
else()
message(WARNING "${wheel_output}")
message(WARNING "Wheel not found! Not building python wheel packages!")
endif()
endif()
if(NOT CMAKE_SUBMODULE)
#set the default path for built libraries to the "lib" directory
if(NOT WIN32 OR MINGW)
set(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib)
else()
set(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib${BITS})
endif()
endif()
message(STATUS "CXX FLAGS: ${CMAKE_CXX_FLAGS}")
add_subdirectory(visiontransfer)
add_subdirectory(examples)
if(BUILD_CYTHON)
add_subdirectory(python)
endif()

View file

@ -0,0 +1,71 @@
Vision Transfer Library 9.0.2
-----------------------------
The given library provides functionally for receiving and transmitting
image pairs over a computer network. The intended use for this library
is to receive output data from Nerian's 3D camera systems. However, the
library also provides functionality for transmitting image data. It can
thus be used for emulating a camera system when performing systems
development, or for transmitting image data to Nerian's SceneScan
system when using network image input.
Images can be transferred with a bit depth of either 8 or 12 bits. When
receiving 12-bit images, the library inflates the images internally to
16 bits, in order to allow for more efficient processing. Monochrome
and RGB images are supported.
When receiving data from a camera system, the first image of an image
set is typically the rectified image of the left camera, with a bit
depth of 8 or 12 bits. The second image is typically a disparity map
with subpixel resolution, which is transmitted as a 12-bit image. Each
value in the disparity map has to be divided by 16 in order to receive
disparities at the correct scale.
There exist three possible ways for receiving and transmitting image
pairs:
* `visiontransfer::AsyncTransfer` allows for the asynchronous reception
or transmission of image pairs. This class creates one or more
threads that handle all network communication.
* `visiontransfer::ImageTransfer` opens up a network socket for sending
and receiving image pairs. This class is single-threaded and will
thus block when receiving or transmitting data.
* `visiontransfer::ImageProtocol` is the most low-level interface. This
class allows for the encoding and decoding of image pairs to / from
network messages. You will have to handle all network communication
yourself.
In order to discover connected devices on the network, the class
`visiontransfer::DeviceEnumeration` can be used, which scans for
available devices and returns a list of `visiontransfer::DeviceInfo`
objects. Such a `visiontransfer::DeviceInfo` object can be used for
instantiating `visiontransfer::ImageTransfer` or
`visiontransfer::AsyncTransfer`.
A separate network protocol is used for reading and writing device
parameters. This protocol is implemented by
`visiontransfer::DeviceParameters`. Any parameters that are changed
through this protocol will be reset if the device is rebooted or if the
user makes a parameter change through the web interface.
The library further includes the class `visiontransfer::Reconstruct3D`,
which can be used for transforming a received disparity map into a set
of 3D points.
Available Examples
------------------
| File name | Description |
|--------------------------------|--------------------------------------------------------------------------------|
| `asynctransfer_example.cpp` | Demonstration of asynchroneous transfers with `visiontransfer::AsyncTransfer`. |
| `imagetransfer_example.cpp` | Demonstration of synchroneous transfers with `visiontransfer::ImageTransfer`. |
| `opencv_example.cpp` | Shows how to convert an ImagePair to OpenCV images. |
| `parameter_example.cpp` | Shows how to read and write device parameters. |
| `pcl_example.cpp` | Shows how to convert a disparity map to a PCL point cloud |
| `server_example.cpp` | Shows how to create a server that acts like a SceneScan device. |
| `imu_data_channel_example.cpp` | Shows how to receive IMU data. |
[Changelog](CHANGELOG.md)

View file

@ -0,0 +1,70 @@
message(STATUS "Python build uses source directory ${CMAKE_CURRENT_SOURCE_DIR}")
set(LIBVISIONTRANSFER_SRCDIR "${CMAKE_CURRENT_SOURCE_DIR}/.." CACHE PATH "Base directory of libvisiontransfer source package")
set(LIBVISIONTRANSFER_LIBDIR "${LIBRARY_OUTPUT_PATH}" CACHE PATH "Base directory of built libvisiontransfer libraries")
set(LIBVISIONTRANSFER_EGGDIR "${LIBRARY_OUTPUT_PATH}/../python3-egg" CACHE PATH "Target directory for Python .egg packaging")
set(LIBVISIONTRANSFER_WHEELDIR "${LIBRARY_OUTPUT_PATH}/../python3-wheel" CACHE PATH "Target directory for Python .whl packaging")
if (WIN32 OR MINGW)
# Extra libs to link in cython step
set(LIBVISIONTRANSFER_EXTRA_LIBS "ws2_32,Iphlpapi")
else()
set(LIBVISIONTRANSFER_EXTRA_LIBS "")
endif()
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/visiontransfer_src/__init__.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/visiontransfer_src)
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/visiontransfer_src/visiontransfer_cpp.pxd.in DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/visiontransfer_src)
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/visiontransfer_src/visiontransfer.pyx.in DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/visiontransfer_src)
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/tools/autogen_docstrings.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/tools)
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/tools/autogen_parameters.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/tools)
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/tools/generate_sources.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/tools)
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/setup.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
if(WIN32 AND NOT MINGW)
# CMAKE_BUILD_TYPE did not work here, but we only build for Release anyway
set(LIBNAME "/Release/visiontransfer-static${LIB_SUFFIX}.lib")
else()
# Linux and msys builds
set(LIBNAME "/libvisiontransfer-static${LIB_SUFFIX}.a")
endif()
# Target to call all required preprocessing and build steps for cython
# (The || cd . is a Unix-compatible way to clear Windows errorlevel if directory already exists)
add_custom_target(cython ALL
DEPENDS visiontransfer-static${LIB_SUFFIX}
COMMENT "Will run the Cython build target"
COMMAND mkdir visiontransfer || cd .
COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH="${CMAKE_CURRENT_BINARY_DIR}" LIBVISIONTRANSFER_SRCDIR="${LIBVISIONTRANSFER_SRCDIR}"
LIBVISIONTRANSFER_LIBDIR=${LIBVISIONTRANSFER_LIBDIR} ${Python3_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/tools/autogen_docstrings.py
COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH="${CMAKE_CURRENT_BINARY_DIR}" LIBVISIONTRANSFER_SRCDIR="${LIBVISIONTRANSFER_SRCDIR}"
LIBVISIONTRANSFER_LIBDIR=${LIBVISIONTRANSFER_LIBDIR} ${Python3_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/tools/autogen_parameters.py
COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH="${CMAKE_CURRENT_BINARY_DIR}" LIBVISIONTRANSFER_SRCDIR="${LIBVISIONTRANSFER_SRCDIR}"
LIBVISIONTRANSFER_LIBDIR=${LIBVISIONTRANSFER_LIBDIR} ${Python3_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/tools/generate_sources.py
COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH="${CMAKE_CURRENT_BINARY_DIR}" LIBVISIONTRANSFER_SRCDIR="${LIBVISIONTRANSFER_SRCDIR}"
LIBVISIONTRANSFER_LIBDIR=${LIBVISIONTRANSFER_LIBDIR} LIBVISIONTRANSFER_LIB=${LIBNAME}
LIBVISIONTRANSFER_EXTRA_LIBS=${LIBVISIONTRANSFER_EXTRA_LIBS}
${Python3_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/setup.py build_ext
COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH="${CMAKE_CURRENT_BINARY_DIR}" LIBVISIONTRANSFER_SRCDIR="${LIBVISIONTRANSFER_SRCDIR}"
LIBVISIONTRANSFER_LIBDIR=${LIBVISIONTRANSFER_LIBDIR} LIBVISIONTRANSFER_LIB=${LIBNAME}
LIBVISIONTRANSFER_EXTRA_LIBS=${LIBVISIONTRANSFER_EXTRA_LIBS}
${Python3_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/setup.py bdist_egg --dist-dir "${LIBVISIONTRANSFER_EGGDIR}"
)
# Wheel is built separately as it might not be available on all systems
if(BUILD_WHEEL)
add_custom_target(wheel ALL
DEPENDS cython
COMMENT "Creates python wheel package"
COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH="${CMAKE_CURRENT_BINARY_DIR}" LIBVISIONTRANSFER_SRCDIR="${LIBVISIONTRANSFER_SRCDIR}"
LIBVISIONTRANSFER_LIBDIR=${LIBVISIONTRANSFER_LIBDIR} LIBVISIONTRANSFER_LIB=${LIBNAME}
LIBVISIONTRANSFER_EXTRA_LIBS=${LIBVISIONTRANSFER_EXTRA_LIBS}
${Python3_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/setup.py bdist_wheel --dist-dir "${LIBVISIONTRANSFER_WHEELDIR}"
)
endif()
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E env LIBVISIONTRANSFER_LIBDIR=${LIBVISIONTRANSFER_LIBDIR} \
LIBVISIONTRANSFER_EXTRA_LIBS=${LIBVISIONTRANSFER_EXTRA_LIBS} \
${Python3_EXECUTABLE} setup.py install WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})" )

View file

@ -0,0 +1,39 @@
Nerian visiontransfer library, Python 3 wrapper
===============================================
This library is a cython wrapper for the C++ library. The wrapper is
constructed from the current libvisiontransfer library during the
regular CMake build process.
If you wish to build it locally anyway, you have to adapt the 'incdir'
and 'libdir' settings in setup.py to point to the libvisiontransfer
header / library directory, respectively. The build steps then are:
export LIBVISIONTRANSFER_BASE=".."
python3 tools/autogen_docstrings.py
python3 tools/autogen_parameters.py
PYTHONPATH="." python3 tools/generate_sources.py
python3 setup.py build_ext --inplace
python3 setup.py install --user # or similar
Examples
--------
The examples/*.py files contain simple examples for using the library, e.g.:
python3 example_qt.py
Documentation
-------------
Documentation (partially auto-generated) is installed with the module:
pydoc3 visiontransfer
Development
-----------
Development should take place only on the visiontransfer_src/*.py.in
template files, as well as the preprocessors in tools/.
Any other files are autogenerated and will be overwritten by make.

View file

@ -0,0 +1,56 @@
#!/usr/bin/env python3
###############################################################################/
# Copyright (c) 2021 Nerian Vision GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
###############################################################################/
from setuptools import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy as np
import os
# default to CMake build based directory structure
srcbase = os.getenv("LIBVISIONTRANSFER_SRCDIR", "../..")
libbase = os.getenv("LIBVISIONTRANSFER_LIBDIR", "../..")
libname = os.getenv("LIBVISIONTRANSFER_LIB", "/libvisiontransfer-static.a")
extra_libs_str = os.getenv("LIBVISIONTRANSFER_EXTRA_LIBS", "")
extra_libs = [s.strip() for s in extra_libs_str.split(',') if s.strip()!='']
print('libvisiontransfer src dir: '+srcbase)
print('libvisiontransfer lib dir: '+libbase)
print('libvisiontransfer lib name: '+libname)
incdir = srcbase
libdir = libbase
setup(
name="visiontransfer",
author="Nerian Vision GmbH",
author_email="service@nerian.com",
version="9.0.2",
packages=["visiontransfer"],
ext_modules=cythonize(
Extension(
name="visiontransfer",
sources=["visiontransfer/visiontransfer.pyx"],
include_dirs=[np.get_include(), incdir],
libraries=[*extra_libs],
extra_objects=[libbase + libname],
language="c++",
define_macros=[("VISIONTRANSFER_NO_DEPRECATION_WARNINGS", "1")], # silently wrap anything we want
#define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")], # for numpy; Cython>=3.0 only
)
, compiler_directives = { 'embedsignature': True })
)

View file

@ -0,0 +1,216 @@
#!/usr/bin/env python3
###############################################################################/
# Copyright (c) 2021 Nerian Vision GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
###############################################################################/
#
# This helper script auto-generates pydoc comments (Google-style syntax)
# from the Doxygen comments in the specified Nerian headers.
#
import sys
import os
import re
def print_error(what):
sys.stderr.write(what + '\n')
class RegexMatcher(object):
def __init__(self):
self.result = None
def search(self, regex, where):
self.result = re.search(regex, where)
return self.result is not None
def group(self, i=0):
return self.result.group(i)
def groups(self):
return self.result.groups()
class DocstringExtractor(object):
def __init__(self):
self.docstrings = {}
pass
def snake_case(self, fnname):
'''Convert mixed case to Python methods' snake case'''
fnname_snake = ''
for c in fnname:
if c.isupper():
fnname_snake += '_' + c.lower()
else:
fnname_snake += c
# Some conventional exceptions :)
fnname_snake = fnname_snake.replace('r_o_i', 'roi')
return fnname_snake
def beautified_docstring(self, comment, indent=8):
ds = ''
cs = [l.strip() for l in comment.split('\n')] # if l.strip()!='']
# remove leading blank lines
reallines = list(filter(lambda x: x>0, [c!='' for c in cs]))
if len(reallines):
cs = cs[reallines[0]:]
#
printed_kwarg = False
extra_indent = 0
for i, c in enumerate(cs):
if c.strip() == '':
extra_indent = 0
next_is_param = False
cnew = ''
increase_extra_indent = 0
for j, w in enumerate(c.split()):
if w in ['\\brief', '\\c']:
pass
elif w in ['\\return']:
ds += '\n'
ds += ' '*indent + 'Returns:\n'
extra_indent = 4
increase_extra_indent = 4
elif w in ['\\param']:
if not printed_kwarg:
ds += ' '*indent + 'Args:\n'
extra_indent = 4
increase_extra_indent = 4
printed_kwarg = True
next_is_param = True
pass
elif w.startswith('\\'):
cnew += (' ' if len(cnew) else '') + w[1].upper()+w[2:]+': '
else:
cnew += (' ' if len(cnew) else '') + w
if next_is_param:
cnew += ':'
next_is_param = False
ds += ' '*indent + ' '*extra_indent + ("'''" if i==0 else "") + cnew + ("'''\n" if i==len(cs)-1 else "")
ds += '\n'
extra_indent += increase_extra_indent
return ds
def generate(self, basedir, filename):
with open(basedir + '/' + filename, 'r') as f:
in_comment = False
comment = ''
names = []
currentname = ''
currentargs = ''
level = 0
restl =''
for rawl in [ll.strip() for ll in f.readlines()]:
l = restl + rawl
had_restl = len(restl) > 0
restl = ''
apply_comment = False
if in_comment:
end = l.find('*/')
thisline = (l if end<0 else l[:end]).lstrip('*').strip()
#if thisline != '':
comment += '\n' + thisline
if end >= 0:
in_comment = False
else:
start = l.find('/**')
if start >= 0:
currentname = '' # force finding new name
currentargs = ''
in_comment = True
comment = l[start+3:]
else:
rem = RegexMatcher()
if rem.search(r'(namespace|class|enum)([^:]*).*[{;]', l):
if comment != '':
cls = rem.group(2).strip().split()[-1]
currentname = cls
currentargs = ''
apply_comment = True
elif rem.search(r'[ \t]*(.*)\(', l): # match word and opening paren
if currentname == '':
cls = rem.group(1).strip().split()[-1]
currentname = cls
if rem.search(r'[ \t]*([^(]*)\((.*)\).*[{;]', l) and l.count('(') == l.count(')'): #: # match function
if l.count('(') == l.count(')'):
# reduce argument list (just names, no types or defaults)
args_just_names = [(a.split('=')[0].strip().split()[-1] if a.strip()!='' else '') for a in rem.group(2).split(',')]
currentargs = '(' + (', '.join(args_just_names)) + ')'
if comment != '':
apply_comment = True
else: # match partial fn or something like it
restl = l # save line for next iteration
continue # and proceed to next line
else:
pass
if apply_comment:
ns = names + [currentname+currentargs]
ns = [n for n in ns if n!='']
name = '::'.join(ns)
if name in self.docstrings and len(ns)>1: # warn, but not for the namespace doc
print_error('Note: not overwriting previous docstring for '+name)
else:
self.docstrings[name] = self.beautified_docstring(comment, indent=8)
comment = ''
for j in range(l.count('{')):
level += 1
names.append(currentname+currentargs)
currentname = ''
currentargs = ''
for j in range(l.count('}')):
level -= 1
names = names[:-1]
currentname = ''
currentargs = ''
def store_docstrings_to_file(self, filename='', fobj=None):
f = open(filename, 'w') if fobj is None else fobj
f.write('''
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !! CAUTION !!
# !! !!
# !! This file is autogenerated from the libvisiontransfer headers !!
# !! using autogen_docstrings.py - manual changes are not permanent !!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!''' + '\n\n')
f.write('_NERIAN_COMPILED_DOCSTRINGS = {\n')
for name, comment in self.docstrings.items():
f.write(" '"+ name + "': \\\n")
f.write(comment.rstrip('\n') + ',\n')
f.write('}\n')
f.write("\n# Also add parameter-less versions for convenience (duplicates overwritten)\n")
f.write("for __k in list(_NERIAN_COMPILED_DOCSTRINGS):\n")
f.write(" if __k.count('('):\n")
f.write(" _NERIAN_COMPILED_DOCSTRINGS[__k.split('(')[0]] = _NERIAN_COMPILED_DOCSTRINGS[__k]\n\n")
if fobj is not None:
f.close()
if __name__=='__main__':
basedir = os.getenv("LIBVISIONTRANSFER_SRCDIR", '../..')
if os.path.isdir(basedir):
d = DocstringExtractor()
for filename in [
'visiontransfer/deviceparameters.h',
'visiontransfer/imageset.h',
'visiontransfer/imageprotocol.h',
'visiontransfer/imagetransfer.h',
'visiontransfer/asynctransfer.h',
'visiontransfer/deviceenumeration.h',
'visiontransfer/deviceinfo.h',
'visiontransfer/sensordata.h',
'visiontransfer/datachannelservice.h',
'visiontransfer/reconstruct3d.h',
]:
d.generate(basedir, filename)
d.store_docstrings_to_file('visiontransfer_src/visiontransfer_docstrings_autogen.py')
else:
print("Could not open library base dir, please set a correct LIBVISIONTRANSFER_SRCDIR")

View file

@ -0,0 +1,183 @@
#!/usr/bin/env python3
###############################################################################/
# Copyright (c) 2021 Nerian Vision GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
###############################################################################/
#
# This helper script auto-generates adapters for all current
# Nerian stereo device parameters directly from the C++ header file.
#
import pathlib
import sys
import os
class Generator(object):
def __init__(self):
self.pxdcode = \
'''
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !! CAUTION !!
# !! !!
# !! This file is autogenerated from the libvisiontransfer headers !!
# !! using autogen.py - manual changes are not permanent! !!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
cdef extern from "visiontransfer/deviceparameters.h" namespace "visiontransfer":
cdef cppclass DeviceParameters:
DeviceParameters(const DeviceInfo &) except +'''.split('\n')
self.pyxcode = \
'''# distutils: language=c++
# cython: language_level=3
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !! CAUTION !!
# !! !!
# !! This file is autogenerated from the libvisiontransfer headers !!
# !! using autogen.py - manual changes are not permanent! !!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from libcpp.string cimport string
from libcpp.vector cimport vector
from libcpp cimport bool
from cython cimport view
cdef class DeviceParameters:
'''.split('\n')
self.pyxcode2 = \
'''
cdef cpp.DeviceParameters* c_obj
def __cinit__(self, DeviceInfo device_info):
self.c_obj = new cpp.DeviceParameters(device_info.c_obj)
def __dealloc__(self):
del self.c_obj
'''.split('\n')
def add_pxd(self, ret, fnname, argstr):
args = [p.strip().split() for p in argstr.split(',')]
# remove default arguments in pxd (present in pyx)
for a in args:
if len(a)>1:
a[1] = a[1].split('=')[0]
self.pxdcode.append(' '*8 + ret + ' ' + fnname + ' ('+(', '.join((a[0]+' '+a[1]) for a in args if len(a)>1))+') except +')
def add_pyx(self, ret, fnname, argstr, comment):
# Generate function name reference also used by doc extractor
args_just_names = [(a.split('=')[0].strip().split()[-1] if a.strip()!='' else '') for a in argstr.split(',')]
currentname = 'visiontransfer::DeviceParameters::' + fnname + '(' + (', '.join(args_just_names)) + ')'
fnname_snake = self.snake_case(fnname)
args = [p.strip().split() for p in argstr.split(',')]
for i in range(len(args)):
if len(args[i])>0:
if args[i][0] in ['int', 'float', 'double', 'bool', 'int&', 'float&', 'double&', 'bool&']:
pass
else:
args[i][0] = "cpp." + str(args[i][0])
if fnname.startswith('set'):
argstr = ', '.join(' '.join(a) for a in args if len(a)>0)
self.pyxcode.append(' '*4 + 'def '+ fnname_snake + '(self' + (', ' if len(argstr) else '') + argstr + '):')
self.pyxcode.append(' '*8 + '_SUBSTITUTE_DOCSTRING_FOR_("' + currentname + '")')
self.pyxcode.append(' '*8 + 'self.c_obj.'+ fnname + '(' + ', '.join(a[1].split('=')[0] for a in args if len(a)>1) + ')')
self.pyxcode.append(' '*0) # extra newline to visually separate blocks
pass
else:
argstr = '' #', '.join(' '.join(a) for a in args if len(a)>0)
newargstr_defaults = ', '.join(a[1] for a in args if len(a)>0)
newargstr_nodefaults = ', '.join(a[1].split('=')[0] for a in args if len(a)>0)
if all(' '.join(a).find('&')<0 for a in args): #len(args)==0 or len(args[0])==0:
if ret in ['int', 'float', 'double', 'bool', 'int&', 'float&', 'double&', 'bool&']:
ret = ''
ret_post = ''
else:
ret += '('
ret_post = ')'
self.pyxcode.append(' '*4 + 'def '+ fnname_snake + '(self' + (', ' if len(newargstr_defaults) else '') + newargstr_defaults + '):')
self.pyxcode.append(' '*8 + '_SUBSTITUTE_DOCSTRING_FOR_("' + currentname + '")')
self.pyxcode.append(' '*8 + 'return '+ret+'self.c_obj.'+ fnname + '(' + newargstr_nodefaults + ')' + ret_post)
else:
self.pyxcode.append(' '*4 + 'def '+ fnname_snake + '(self' + (', ' if len(argstr) else '') + argstr + '):')
self.pyxcode.append(' '*8 + '_SUBSTITUTE_DOCSTRING_FOR_("' + currentname + '")')
for a in args:
rawtype = a[0].replace('&', '')
var = a[1] if a[1].find('=')>0 else (a[1]+' = 0')
self.pyxcode.append(' '*8 + 'cdef '+rawtype+' '+var)
self.pyxcode.append(' '*8 + 'self.c_obj.'+ fnname + '(' + newargstr_nodefaults + ')')
self.pyxcode.append(' '*8 + 'return '+newargstr_nodefaults)
self.pyxcode.append(' '*0) # extra newline to visually separate blocks
def snake_case(self, fnname):
'''Convert mixed case to Python methods' snake case'''
fnname_snake = ''
for c in fnname:
if c.isupper():
fnname_snake += '_' + c.lower()
else:
fnname_snake += c
# Some conventional exceptions :)
fnname_snake = fnname_snake.replace('r_o_i', 'roi')
return fnname_snake
def generate(self, basedir):
with open(basedir + '/visiontransfer/deviceparameters.h', 'r') as f:
in_comment = False
comment = ''
level = 0
for l in [ll.strip() for ll in f.readlines()]:
if in_comment:
end = l.find('*/')
thisline = (l if end<0 else l[:end]).lstrip('*').strip()
if thisline != '':
comment += '\n' + thisline
if end >= 0:
in_comment = False
else:
start = l.find('/**')
if start >= 0:
in_comment = True
comment = l[start+3:]
else:
if level==1 and l.find(' DeviceParameters {') >= 0:
# insert class docstring
self.pyxcode.append(' '*4 + '_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceParameters")')
self.pyxcode.extend(self.pyxcode2)
self.pyxcode2 = []
comment = ''
elif level==2 and l.find('(') >= 0 and l.find('{') > 0 and (l.find('get') > 0 or l.find('set') > 0):
ret = l.split()[0]
fnname = l.split()[1].split('(')[0]
args = l.split('(')[1].split(')')[0]
self.add_pxd(ret, fnname, args)
self.add_pyx(ret, fnname, args, comment)
comment = ''
else:
pass
level += l.count('{')
level -= l.count('}')
if __name__=='__main__':
basedir = os.getenv("LIBVISIONTRANSFER_SRCDIR", '../..')
if os.path.isdir(basedir):
g = Generator()
g.generate(basedir)
pathlib.Path("visiontransfer").mkdir(parents=True, exist_ok=True)
with open('visiontransfer/visiontransfer_parameters_cpp_autogen.pxd', 'w') as f:
f.write('\n'.join(g.pxdcode))
with open('visiontransfer/visiontransfer_parameters_autogen.pyx.in', 'w') as f:
f.write('\n'.join(g.pyxcode))
else:
print("Could not open library base dir, please set a correct LIBVISIONTRANSFER_SRCDIR")

View file

@ -0,0 +1,81 @@
#!/usr/bin/env python3
###############################################################################/
# Copyright (c) 2021 Nerian Vision GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
###############################################################################/
#
# This helper script constructs commented Cython .pxd/.pyx source files
# from their .in templates, utilizing the docstrings generated earlier.
#
import re
from visiontransfer_src.visiontransfer_docstrings_autogen import _NERIAN_COMPILED_DOCSTRINGS
def get_docstring(what):
if what is None:
sys.stderr.write('IMPLEMENT_ME: missing docstring link')
doc = 'IMPLEMENT_ME: missing docstring link'
elif what in _NERIAN_COMPILED_DOCSTRINGS:
doc = _NERIAN_COMPILED_DOCSTRINGS[what]
else:
doc = '(No extra documentation for ' + what + ')'
#partial_matches = [k for k in _NERIAN_COMPILED_DOCSTRINGS.keys() if k.startswith(what.split('(')[0])]
#doc += '\nCandidates:'
#for p in partial_matches:
# doc += '\n "'+p+'"'
return doc
def process_infile_to_outfile(infilename, outfilename):
with open(infilename, 'r') as infile:
with open(outfilename, 'w') as outfile:
outfile.write( \
'''# distutils: language=c++
# cython: language_level=3
########################################
## Autogenerated file. Do not change! ##
## Work on its .in template instead ##
## (found inside visiontransfer_src). ##
########################################
''')
# looking for docstring substitution sites, the C++ docs are translated
# to docstring-like docs, and optionally prepended by a note. The same
# indentation that the macro has is used for the entire docstring.
# Syntax: _SUBSTITUTE_DOCSTRING_FOR_("CppNamespace::CppClassOrFn"[, "Python note"])
for line in infile.readlines():
if line.find('_SUBSTITUTE_DOCSTRING_FOR_(') >= 0:
toks = line.split('"')
what = toks[1]
notelines = [] if len(toks)<4 else ([''] + list(toks[3].split('\n')) + ['']) # extra Python-only note
m = re.match(r'([ \t]*)', line)
whitespace = m.group(1) if m else ''
whitespace_len = len(whitespace) # common indent
clines = get_docstring(what).split('\n')
alllines = notelines + clines
for i, cl in enumerate(alllines):
if i==0:
printline = whitespace + ("'''" if i==0 else '') + cl + ("'''" if i==len(alllines)-1 else '')
else:
printline = cl + ("'''" if i==len(alllines)-1 else '')
outfile.write(printline + '\n')
else:
outfile.write(line)
if __name__=='__main__':
process_infile_to_outfile('visiontransfer_src/visiontransfer.pyx.in', 'visiontransfer/visiontransfer.pyx')
process_infile_to_outfile('visiontransfer_src/visiontransfer_cpp.pxd.in', 'visiontransfer/visiontransfer_cpp.pxd')
process_infile_to_outfile('visiontransfer/visiontransfer_parameters_autogen.pyx.in', 'visiontransfer/visiontransfer_parameters_autogen.pyx')

View file

@ -0,0 +1,876 @@
# distutils: language=c++
# cython: embedsignature=True, language_level=3
###############################################################################/
# Copyright (c) 2021 Nerian Vision GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
###############################################################################/
'''
Python 3 wrapper for libvisiontransfer by Nerian Vision
This module is a wrapper for the libvisiontransfer library,
used to control and acquire data from Nerian's line of stereo
vision devices.
This documentation is largely autogenerated from the
C++ library doxygen annotations:
Please note that in some instances, the actual functions have been
adapted to be more Pythonic from their C++-specific calling conventions.
In particular, the auto-generated documentation of parameter getter
functions may indicate a number of arguments (C++ reference arguments),
but they actually directly return tuples in this Python library.
Refer to their Cython signature line (first line of their docstring)
to see the true arguments you can use; the rest of the arguments in
the C++ argument list is instead returned as a result tuple.
=============================================================================
Copyright (c) 2021 Nerian Vision GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
'''
cimport visiontransfer_cpp as cpp
# Autogenerated parameter access in extra file
include "visiontransfer/visiontransfer_parameters_autogen.pyx"
from libcpp.string cimport string
from libcpp.vector cimport vector
from libcpp cimport bool
from cython cimport view
cimport numpy as np
import numpy as np
np.import_array()
import enum
import sys
import time
class AutoMode(enum.IntEnum):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceParameters::AutoMode")
AUTO_EXPOSURE_AND_GAIN = 0
AUTO_EXPOSURE_MANUAL_GAIN = 1
MANUAL_EXPOSURE_AUTO_GAIN = 2
MANUAL_EXPOSURE_MANUAL_GAIN = 3
class DeviceModel(enum.IntEnum):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceParameters::DeviceModel")
SCENESCAN = 0
SCENESCAN_PRO = 1
class NetworkProtocol(enum.IntEnum):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceInfo::NetworkProtocol")
PROTOCOL_TCP = 0
PROTOCOL_UDP = 1
class ProtocolType(enum.IntEnum):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageProtocol::ProtocolType")
PROTOCOL_TCP = 0
PROTOCOL_UDP = 1
class ImageFormat(enum.IntEnum):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::ImageFormat")
FORMAT_8_BIT_MONO = 0
FORMAT_8_BIT_RGB = 1
FORMAT_12_BIT_MONO = 2
class ImageType(enum.IntEnum):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::ImageType")
IMAGE_UNDEFINED = 0
IMAGE_LEFT = 1
IMAGE_DISPARITY = 2
IMAGE_RIGHT = 3
class OperationMode(enum.IntEnum):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceParameters::OperationMode")
PASS_THROUGH = 0
RECTIFY = 1
STEREO_MATCHING = 2
class TargetFrame(enum.IntEnum):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceParameters::TargetFrame")
LEFT_FRAME = 0
RIGHT_FRAME = 1
BOTH_FRAMES = 2
cdef class DeviceEnumeration:
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceEnumeration")
cdef cpp.DeviceEnumeration c_obj
def discover_devices(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceEnumeration::discoverDevices")
device_infos = []
cdef vector[cpp.DeviceInfo] devices = self.c_obj.discoverDevices()
for device in devices:
di = DeviceInfo()
di.c_obj = device
device_infos.append(di)
return device_infos
cdef class DeviceInfo:
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceInfo")
cdef cpp.DeviceInfo c_obj
def get_ip_address(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceInfo::getIpAddress")
cdef string s = self.c_obj.getIpAddress()
return s.decode("utf-8")
def get_network_protocol(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceInfo::getNetworkProtocol")
return NetworkProtocol(self.c_obj.getNetworkProtocol())
def get_firmware_version(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceInfo::getFirmwareVersion")
cdef string s = self.c_obj.getFirmwareVersion()
return s.decode("utf-8")
def get_model(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceInfo::getModel")
return DeviceModel(self.c_obj.getModel())
def get_status(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceInfo::getStatus")
ds = DeviceStatus()
ds.c_obj = self.c_obj.getStatus()
return ds
def is_compatible(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceInfo::isCompatible")
return self.c_obj.isCompatible()
def __str__(self):
cdef string s = self.c_obj.toString()
return s.decode("utf-8")
__repr__ = __str__
cdef class DeviceStatus:
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceStatus")
cdef cpp.DeviceStatus c_obj
def is_valid(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceStatus::isValid")
return self.c_obj.isValid()
def get_last_fps(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceStatus::getLastFps")
return self.c_obj.getLastFps()
def get_jumbo_mtu(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceStatus::getJumboMtu")
return self.c_obj.getJumboMtu()
def get_jumbo_frames_enabled(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceStatus::getJumboFramesEnabled")
# return as bool here (still uint in API)
return self.c_obj.getJumboFramesEnabled() != 0
def get_current_capture_source(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::DeviceStatus::getCurrentCaptureSource")
cdef string s = self.c_obj.getCurrentCaptureSource()
return s.decode("utf-8")
def create_image_set_from_reduced_data(width, height, nimg, indices, strides, formats, data, qmat, seqnum, subpix, expos, disprange, times, lastsync):
'''Only for internal use (shim for unpickling / copy).'''
imset = ImageSet()
imset.set_width(width)
imset.set_height(height)
imset.set_number_of_images(nimg)
for i, what in enumerate([ImageType.IMAGE_LEFT, ImageType.IMAGE_DISPARITY, ImageType.IMAGE_RIGHT]):
imset.set_index_of(what, indices[i])
for i in range(nimg):
imset.set_row_stride(i, strides[i])
imset.set_pixel_format(i, formats[i])
imset.set_pixel_data(i, data[i])
imset.set_qmatrix(qmat)
imset.set_sequence_number(seqnum)
imset.set_subpixel_factor(subpix)
imset.set_exposure_time(expos)
a, b = disprange
imset.set_disparity_range(a, b)
a, b = times
imset.set_timestamp(a, b)
a, b = lastsync
imset.set_last_sync_pulse(a, b)
return imset
cdef class ImageSet:
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet")
cdef cpp.ImageSet c_obj
cdef np.ndarray _numpy_q
cdef list _numpy_pixels
cdef bool _touched_internally
def __cinit__(self):
# These members are just here to keep alive the refcounted
# data references for ImageSets created on the Python side
# (e.g. unpickled data) -- bear in mind the C++ API purely
# operates on unmanaged raw data pointers into numpy arrays.
self._numpy_q = None
self._numpy_pixels = [None]*3 # MAX_SUPPORTED_IMAGES
# Successfully setting pixel data from Python flags this
# object, whitelisting later overwriting (which is prevented
# a priori for all C++-/ImageTransfer-managed objects).
self._touched_internally = False
def __reduce__(self):
nimg = self.get_number_of_images()
return (create_image_set_from_reduced_data, (
self.get_width(),
self.get_height(),
nimg,
[self.get_index_of(i) for i in [ImageType.IMAGE_LEFT, ImageType.IMAGE_DISPARITY, ImageType.IMAGE_RIGHT]],
[self.get_row_stride(i) for i in range(nimg)],
[self.get_pixel_format(i) for i in range(nimg)],
[self.get_pixel_data_raw(i) for i in range(nimg)],
self.get_qmatrix(),
self.get_sequence_number(),
self.get_subpixel_factor(),
self.get_exposure_time(),
self.get_disparity_range(),
self.get_timestamp(),
self.get_last_sync_pulse(),
))
def __str__(self):
w = self.get_width()
h = self.get_height()
return f"ImageSet({w}, {h})"
__repr__ = __str__
def copy(self):
'''
Create a full copy of the ImageSet. All its data is managed by Python (i.e.
no deallocation attempts by the C++ API will ever take place on this clone).
'''
cloned = ImageSet()
nimg = self.get_number_of_images()
cloned.set_height(self.get_height())
cloned.set_width(self.get_width())
cloned.set_number_of_images(nimg)
for i in [ImageType.IMAGE_LEFT, ImageType.IMAGE_DISPARITY, ImageType.IMAGE_RIGHT]:
cloned.set_index_of(i, self.get_index_of(i))
for i in range(nimg):
cloned.set_row_stride(i, self.get_row_stride(i))
cloned.set_pixel_format(i, self.get_pixel_format(i))
sz = cloned.get_height() * cloned.get_row_stride(i)
cloned.set_pixel_data(i, self.get_pixel_data_raw(i).copy())
# this also sets _touched_internally -> data is replaceable
cloned.set_qmatrix(self.get_qmatrix())
cloned.set_sequence_number(self.get_sequence_number())
cloned.set_subpixel_factor(self.get_subpixel_factor())
cloned.set_exposure_time(self.get_exposure_time())
a, b = self.get_disparity_range()
cloned.set_disparity_range(a, b)
a, b = self.get_timestamp()
cloned.set_timestamp(a, b)
a, b = self.get_last_sync_pulse()
cloned.set_last_sync_pulse(a, b)
return cloned
def get_width(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::getWidth")
return self.c_obj.getWidth()
def get_height(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::getHeight")
return self.c_obj.getHeight()
def get_row_stride(self, image_number):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::getRowStride")
return self.c_obj.getRowStride(image_number)
def get_pixel_format(self, what):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::getPixelFormat")
image_number = self.get_index_of(what, True) if isinstance(what, ImageType) else int(what)
return ImageFormat(self.c_obj.getPixelFormat(<int> image_number))
def get_pixel_data_raw(self, what):
'''Return a flat uint8 view of the image data of the specified channel (primarily for internal use).'''
image_number = self.get_index_of(what, True) if isinstance(what, ImageType) else int(what)
cdef int rowstride = self.c_obj.getRowStride(image_number)
cdef int h = self.c_obj.getHeight()
cdef int size = rowstride * h
np_array = self._pixel_data_as_char_array(image_number, size)
return np_array
def get_pixel_data(self, what, force8bit=False, do_copy=True):
'''
Obtain a numpy array containing the image data for a channel.
Args:
what: The ImageType or image index to retrieve.
force8bit: optional flag, causes rescaling to 0..255 in case of 12-bit images (dividing by 16).
do_copy: copy the final array view (default True; primarily for internal use, disable with caution)
Returns:
The image data as a copied numpy array; two-dimensional for monochrome images, three-dimensional for RGB.
'''
image_number = self.get_index_of(what, True) if isinstance(what, ImageType) else int(what)
cdef int rowstride = self.c_obj.getRowStride(image_number)
cdef int w = self.c_obj.getWidth()
cdef int h = self.c_obj.getHeight()
cdef int size
fmt = self.get_pixel_format(image_number)
if fmt == ImageFormat.FORMAT_12_BIT_MONO:
size = (rowstride * h) // 2
np_array = self._pixel_data_as_short_array(image_number, size)
np_array = np_array.reshape(h, rowstride//2)
np_array = np_array[:, :w]
if force8bit:
return (np_array // 16).astype(np.uint8) # implicit copy
else:
return np_array.copy() if do_copy else np_array
elif fmt == ImageFormat.FORMAT_8_BIT_RGB:
size = rowstride * h
np_array = self._pixel_data_as_char_array(image_number, size)
np_array = np_array.reshape(h, rowstride//3, 3)
np_array = np_array[:, :w, :]
return np_array.copy() if do_copy else np_array
elif fmt == ImageFormat.FORMAT_8_BIT_MONO:
size = rowstride * h
np_array = self._pixel_data_as_char_array(image_number, size)
np_array = np_array.reshape(h, rowstride)
np_array = np_array[:, :w]
return np_array.copy() if do_copy else np_array
cdef _pixel_data_as_short_array(self, int image_number, int size):
cdef unsigned char* pointer = self.c_obj.getPixelData(image_number)
cdef np.uint16_t* short_prt = <np.uint16_t *> pointer
cdef np.uint16_t[:] myview = <np.uint16_t[:size]> short_prt
return np.asarray(myview)
cdef _pixel_data_as_char_array(self, int image_number, int size):
cdef unsigned char* pointer = self.c_obj.getPixelData(image_number)
cdef np.uint8_t[:] char_view = <np.uint8_t[:size]> pointer
return np.asarray(char_view)
def get_qmatrix(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::getQMatrix")
cdef view.array ar = view.array(shape=(16, ), itemsize=sizeof(float), format="f", mode="c", allocate_buffer=False)
cdef const float* pointer = self.c_obj.getQMatrix()
ar.data = <char*> pointer
np_array = np.asarray(ar)
np_array = np_array.reshape(4, 4)
return np_array
def get_sequence_number(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::getSequenceNumber")
return self.c_obj.getSequenceNumber()
def get_timestamp(self):
'''
Returns the time at which this image set has been captured.
Returns:
sec, usec: A tuple representing the time stamp: the integer seconds, and the
fractional seconds part in microseconds.
'''
cdef int sec = 0
cdef int usec = 0
self.c_obj.getTimestamp(sec, usec)
return sec, usec
def get_disparity_range(self):
'''
Gets the value range for the disparity map contained in this
image set. If the image set does not contain any disparity data
then the disparity range is undefined.
Returns:
minimum, maximum: The minimum and maximum disparity in the image set.
'''
cdef int minimum = 0
cdef int maximum = 0
self.c_obj.getDisparityRange(minimum, maximum)
return minimum, maximum
def get_subpixel_factor(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::getSubpixelFactor")
return self.c_obj.getSubpixelFactor()
def write_pgm_file(self, image_number, filename):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::writePgmFile")
self.c_obj.writePgmFile(image_number, filename.encode())
def is_image_disparity_pair(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::isImageDisparityPair")
return self.c_obj.isImageDisparityPair()
def get_bytes_per_pixel(self, what):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::getBytesPerPixel")
image_number = self.get_index_of(what, True) if isinstance(what, ImageType) else int(what)
return self.c_obj.getBytesPerPixel(<int> image_number)
def get_number_of_images(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::getNumberOfImages")
return self.c_obj.getNumberOfImages()
def get_index_of(self, what, throw_if_not_found=False):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::getIndexOf")
return self.c_obj.getIndexOf(what, throw_if_not_found)
def has_image_type(self, what):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::hasImageType")
return self.c_obj.hasImageType(what)
def get_exposure_time(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::getExposureTime")
return self.c_obj.getExposureTime()
def get_last_sync_pulse(self):
'''
Gets the timestamp of the last received sync pulse.
Returns:
sec, usec: A tuple representing the time stamp: the integer seconds, and the
fractional seconds part in microseconds.
'''
cdef int sec = 0
cdef int usec = 0
self.c_obj.getLastSyncPulse(sec, usec)
return sec, usec
def set_width(self, width):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setWidth")
self.c_obj.setWidth(width)
def set_height(self, height):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setHeight")
self.c_obj.setHeight(height)
def set_row_stride(self, image_number, row_stride):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setRowStride")
self.c_obj.setRowStride(image_number, row_stride)
def set_pixel_format(self, image_number, image_format):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setPixelFormat")
self.c_obj.setPixelFormat(image_number, image_format)
def set_pixel_data(self, image_number, np.ndarray[np.uint8_t, ndim=1, mode="c"] pixel_data):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setPixelData")
cdef unsigned char* oldptr = self.c_obj.getPixelData(image_number)
if oldptr != NULL and not self._touched_internally:
# This is the only kind of data access we actively prevent here.
# The C++ API (ImageTransfer) would have no way of freeing its own
# buffers, and would try to free the numpy array data instead!
# The double check is done because it is OK to replace one numpy
# array with a different one (not really sensible, but valid).
raise RuntimeError('Refused to set pixel data: pixel data is managed by the C++ API. Please use copy() or start from an empty ImageSet.')
self.c_obj.setPixelData(image_number, &pixel_data[0]) # raw pointer is stored (will throw here on invalid index)
self._numpy_pixels[image_number] = pixel_data # store locally for refcount
self._touched_internally = True # object is whitelisted for overwriting data
def set_qmatrix(self, np.ndarray[float, ndim=2, mode="c"] q):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setQMatrix")
self.c_obj.setQMatrix(&q[0, 0]) # a raw pointer is passed and stored
self._numpy_q = q # but a reference is stored here to hold a refcount
def set_sequence_number(self, num):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setSequenceNumber")
self.c_obj.setSequenceNumber(num)
def set_timestamp(self, sec, usec):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setTimestamp")
self.c_obj.setTimestamp(sec, usec)
def set_disparity_range(self, minimum, maximum):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setDisparityRange")
self.c_obj.setDisparityRange(minimum, maximum)
def set_subpixel_factor(self, subpixel_factor):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setSubpixelFactor")
self.c_obj.setSubpixelFactor(subpixel_factor)
def set_number_of_images(self, number):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setNumberOfImages")
cdef unsigned char* oldptr = self.c_obj.getPixelData(0)
# Changing the number of images with data present could mess up
# memory management (e.g. by allowing to add numpy data to new
# channels of C++-managed objects, or preventing necessary frees).
# Therefore, we allow setting this number only in ImageSets that
# have only been filled from the Python side.
if oldptr != NULL and not self._touched_internally:
raise RuntimeError('Refused to change number of images: pixel data is managed by the C++ API. Please use copy() or start from an empty ImageSet.')
self.c_obj.setNumberOfImages(number)
def set_index_of(self, what, idx):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setIndexOf")
self.c_obj.setIndexOf(what, idx)
def set_exposure_time(self, time_microsec):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setExposureTime")
self.c_obj.setExposureTime(time_microsec)
def set_last_sync_pulse(self, sec, usec):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageSet::setLastSyncPulse")
return self.c_obj.setLastSyncPulse(sec, usec)
cdef class ImageTransfer:
'''
Class for synchronous transfer of image sets.
This class opens a network socket for delivering or receiving image sets. All
operations are performed synchronously, which means that they might block.
The class encapsulates ImageProtocol.
This class is thread safe for as long as sending and receiving data
each has its dedicated thread.
Note for Python version: for best performance, the use of AsyncTransfer
is recommended for all regular desktop systems.
'''
cdef cpp.ImageTransfer* c_obj
def __cinit__(self, DeviceInfo device, int buffer_size=1048576, int max_udp_packet_size=1472):
self.c_obj = new cpp.ImageTransfer(device.c_obj, buffer_size, max_udp_packet_size)
def __dealloc__(self):
del self.c_obj
def is_connected(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageTransfer::isConnected")
return self.c_obj.isConnected()
def disconnect(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageTransfer::disconnect")
self.c_obj.disconnect()
def get_remote_address(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageTransfer::getRemoteAddress")
cdef string s = self.c_obj.getRemoteAddress()
return s.decode("utf-8")
def receive_image_pair(self):
'''DEPRECATED: Use receive_image_set() instead.'''
return self.receive_image_set()
def receive_image_set(self):
'''
Waits for and receives a new image set.
Returns:
Returns an ImageSet a new image set has been received. Otherwise
None.
The received image set is only valid until the next call of receive_image_set.
The method will not block indefinitely, but return after a short timeout.
You can use receive() as a Python library convenience wrapper
for more efficient repolling with custom delay and number of attempts.
'''
imp = ImageSet()
ret = self.c_obj.receiveImageSet(imp.c_obj)
return imp if ret else None
def receive(self, timeout=-1, poll_delay=0.001):
'''
Python: polling wrapper for receive_image_set.
Args:
timeout: The timeout in seconds before returning None unless an
image arrives. A non-positive timeout means to wait forever.
poll_delay: The sleep delay to enforce after each polling
attempt.
Returns:
An ImageSet if an image set has been received before the timeout.
None otherwise.
On desktop systems, use AsyncTransfer instead for best performance.
'''
imp = ImageSet()
t0 = time.time()
while timeout <= 0 or (time.time() - t0) < timeout:
ret = self.c_obj.receiveImageSet(imp.c_obj)
if ret: return imp
time.sleep(poll_delay)
return None
def get_num_dropped_frames(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::ImageTransfer::getNumDroppedFrames")
return self.c_obj.getNumDroppedFrames()
cdef class AsyncTransfer:
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::AsyncTransfer")
cdef cpp.AsyncTransfer* c_obj
def __cinit__(self, DeviceInfo device, int buffer_size=1048576, int max_udp_packet_size=1472):
self.c_obj = new cpp.AsyncTransfer(device.c_obj, buffer_size, max_udp_packet_size)
def __dealloc__(self):
del self.c_obj
def is_connected(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::AsyncTransfer::isConnected")
return self.c_obj.isConnected()
def disconnect(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::AsyncTransfer::disconnect")
self.c_obj.disconnect()
def get_remote_address(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::AsyncTransfer::getRemoteAddress")
cdef string s = self.c_obj.getRemoteAddress()
return s.decode("utf-8")
def collect_received_image_pair(self, timeout=-1):
'''DEPRECATED: Use collect_received_image_set() instead.'''
return self.collect_received_image_set(timeout)
def collect_received_image_set(self, timeout=-1):
'''
Collects the asynchronously received image.
Args:
timeout: The maximum time in seconds for which to wait if no
image set has been received yet.
Returns:
An ImageSet if an image set has been received before the timeout.
If no image set has been received, this method might block or return None.
Otherwise the returned image set is valid until the next call.
If timeout is set to a value < 0, the function will block indefinitely.
If timeout = 0, the function will return immediately, and if timeout is > 0 then
the function will block for the given amount of time in seconds. The received
image set is only valid until the next call of this function.
'''
imp = ImageSet()
ret = self.c_obj.collectReceivedImageSet(imp.c_obj, timeout)
return imp if ret else None
def get_num_dropped_frames(self):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::AsyncTransfer::getNumDroppedFrames")
return self.c_obj.getNumDroppedFrames()
cdef class Reconstruct3D:
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::Reconstruct3D")
cdef cpp.Reconstruct3D c_obj
def create_point_map_from_disparity_data(self, disp_map_data, width, height, row_stride, q, min_disparity=1, subpixel_factor=16, max_z=0, max_disparity=0xFFF):
'''
Reconstructs the 3D location of each pixel in the given disparity map,
with custom parameters.
Args:
disp_map_data: Data of the disparity map (unsigned short array). The
disparity map is assumed to have a N-bit subpixel resolution.
This means that each value needs to be divided by the subpixel factor
to receive the true disparity.
width, height: Disparity map dimensions
row_stride: Row stride (i.e. distance between two rows in bytes)
of the disparity map.
q: Disparity-to-depth mapping matrix of size 4x4. The matrix is
stored in a row-wise alignment. Obtain this matrix from your
camera calibration data.
minDisparity: The minimum disparity, again with N-bit subpixel
resolution. Lower disparities will be clamped to this value
before computing the 3D location (default 1).
subpixel_factor: Subpixel division factor for disparity value
(default 16)
max_z: (Python specific) Filter the numpy array to only return
points closer than specified value. A non-positive value means
no filtering (default).
max_disparity: The maximum value that occurs in the disparity map. Any value
greater or equal will be marked as invalid.
Returns:
A numpy array of size [:,3] containing the 3D points corresponding to the disparity map.
Please refer to the C++ API docs for further details.
'''
cdef int size = width * height * 4
cdef unsigned short[:, ::1] disp_map_arr = disp_map_data
cdef float[:, ::1] q_arr = q.astype(np.float32)
cdef float* point_map_data = self.c_obj.createPointMap(&disp_map_arr[0, 0], width, height, row_stride, &q_arr[0, 0], min_disparity, subpixel_factor, max_disparity)
cdef view.array arr = view.array(shape=(size,), itemsize=sizeof(float), format="f", mode="c", allocate_buffer=False)
arr.data = <char*> point_map_data
np_array = np.asarray(arr)
np_array = np_array.reshape(height * width, 4)
np_array = np_array[:, :3]
if max_z > 0:
np_array = np_array[np_array[:, 2] < max_z]
return np_array
def create_point_map(self, ImageSet image_set, min_disparity=1, max_z=0):
'''
Reconstructs the 3D location of each pixel using the disparity map
and metadata of the given image set.
Args:
image_set: Image set containing the disparity map.
min_disparity: The minimum disparity with 4-bit subpixel resolution.
max_z: (Python specific) Filter the numpy array to only return
points closer than specified value. A non-positive value means
no filtering (default).
Returns:
A numpy array of size [:,3] containing the 3D points corresponding to the disparity map.
Please refer to the C++ API docs for further details.
'''
cdef int w = image_set.c_obj.getWidth()
cdef int h = image_set.c_obj.getHeight()
cdef int size = w * h * 4
cdef float* point_map_data = self.c_obj.createPointMap(image_set.c_obj, min_disparity)
cdef view.array arr = view.array(shape=(size,), itemsize=sizeof(float), format="f", mode="c", allocate_buffer=False)
arr.data = <char*> point_map_data
np_array = np.asarray(arr)
np_array = np_array.reshape(h * w, 4)
np_array = np_array[:, :3]
if max_z > 0:
np_array = np_array[np_array[:, 2] < max_z]
return np_array
def create_point_map_and_color_map(self, ImageSet image_set, min_disparity=1, max_z=0):
'''
Reconstructs the 3D location of each pixel using the disparity map
and metadata of the given image set, alongside their colors.
Args:
image_set: Image set containing the disparity map.
min_disparity: The minimum disparity with 4-bit subpixel resolution.
max_z: (Python specific) Filter the numpy array to only return
points closer than specified value. A non-positive value means
no filtering (default).
Returns:
Two numpy arrays of identical size [:,3], the first containing the 3D points corresponding
to the disparity map, and the second one their colors as float RGB triplets (or None if
the ImageSet is disparity-only).
'''
cdef int w = image_set.c_obj.getWidth()
cdef int h = image_set.c_obj.getHeight()
cdef int size = w * h * 4
cdef float* point_map_data = self.c_obj.createPointMap(image_set.c_obj, min_disparity)
cdef view.array arr = view.array(shape=(size,), itemsize=sizeof(float), format="f", mode="c", allocate_buffer=False)
arr.data = <char*> point_map_data
coords = np.asarray(arr)
coords = coords.reshape(h * w, 4)
coords = coords[:, :3]
pix = None
if image_set.has_image_type(ImageType.IMAGE_LEFT):
pix = image_set.get_pixel_data(ImageType.IMAGE_LEFT, force8bit=True, do_copy=False)
if len(pix.shape)==2: pix = np.stack([pix]*3, 2) # Expand grayscale to rgb triplets
pix = pix.reshape((-1, 3)).astype(np.float64) / 255.0
if max_z > 0:
if pix is not None:
pix = pix[coords[:, 2] < max_z]
coords = coords[coords[:, 2] < max_z]
return coords, pix
def create_open3d_pointcloud(self, ImageSet image_set, min_disparity=1, max_z=0):
'''
Convenience wrapper to directly return an Open3D point cloud for an image set.
Args:
image_set: Image set containing the disparity map.
min_disparity: The minimum disparity with 4-bit subpixel resolution.
max_z: (Python specific) Filter the point cloud data to only return
points closer than specified value. A non-positive value means
no filtering (default).
Returns:
An open3d.geometry.PointCloud for the (filtered) coordinates from the ImageSet.
Contains color information unless the ImageSet was disparity-only.
'''
import open3d
pointmap, colors = self.create_point_map_and_color_map(image_set, min_disparity=min_disparity, max_z=max_z)
pcd = open3d.geometry.PointCloud(open3d.utility.Vector3dVector(pointmap))
if colors is not None:
pcd.colors = open3d.utility.Vector3dVector(colors)
return pcd
def create_open3d_rgbd_image(self, ImageSet image_set, min_disparity=1, depth_trunc=3.0, depth_scale=1.0):
'''
Convenience wrapper to directly return an Open3D RGBD image for an ImageSet.
Raises a RuntimeError when called with a disparity-only image set.
Args:
image_set: Image set containing the disparity map.
min_disparity: The minimum disparity with 4-bit subpixel resolution.
depth_trunc: (Open3D argument, relayed) Filter the depth channel to
zero-clamp points more distant than the specified value (default 3.0).
Returns:
An open3d.geometry.RGBDImage for the image set.
'''
import open3d
if not image_set.has_image_type(ImageType.IMAGE_LEFT):
raise RuntimeError('Cannot create an RGBD image - no left image data in ImageSet')
cdef int w = image_set.c_obj.getWidth()
cdef int h = image_set.c_obj.getHeight()
cdef float* z_data = self.c_obj.createZMap(image_set.c_obj, minDisparity=min_disparity, maxDisparity=0xFFF)
cdef view.array arr = view.array(shape=(h, w,), itemsize=sizeof(float), format="f", mode="c", allocate_buffer=False)
arr.data = <char*> z_data
depth = np.asarray(arr).astype(np.float32)
color = image_set.get_pixel_data(ImageType.IMAGE_LEFT, force8bit=True)
img = open3d.geometry.RGBDImage.create_from_color_and_depth(
open3d.cpu.pybind.geometry.Image(color),
open3d.cpu.pybind.geometry.Image(depth),
depth_scale=depth_scale, depth_trunc=depth_trunc)
return img
def project_single_point(self, point_x, point_y, disparity, q, subpix_factor):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::Reconstruct3D::projectSinglePoint", "PYTHON NOTE: Returns a tuple (pointX, pointY, pointZ). Please ignore those C++ reference arguments.")
cdef float proj_x = 0
cdef float proj_y = 0
cdef float proj_z = 0
cdef float[:, ::1] q_arr = q.astype(np.float32)
self.c_obj.projectSinglePoint(point_x, point_y, disparity, &q_arr[0, 0], proj_x, proj_y, proj_z, subpix_factor)
return proj_x, proj_y, proj_z
def write_ply_file(self, filename, ImageSet image_set, double max_z=sys.float_info.max, bool binary=False):
_SUBSTITUTE_DOCSTRING_FOR_("visiontransfer::Reconstruct3D::writePlyFile")
self.c_obj.writePlyFile(filename.encode(), image_set.c_obj, max_z, binary)

View file

@ -0,0 +1,161 @@
###############################################################################/
# Copyright (c) 2021 Nerian Vision GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
###############################################################################/
from libcpp.vector cimport vector
from libcpp.string cimport string
from libcpp cimport bool
cdef extern from "visiontransfer/deviceinfo.h" namespace "visiontransfer::DeviceInfo::DeviceModel":
cdef enum DeviceModel "visiontransfer::DeviceInfo::DeviceModel":
SCENESCAN
SCENESCAN_PRO
cdef extern from "visiontransfer/deviceinfo.h" namespace "visiontransfer::DeviceInfo::NetworkProtocol":
cdef enum NetworkProtocol "visiontransfer::DeviceInfo::NetworkProtocol":
PROTOCOL_TCP
PROTOCOL_UDP
cdef extern from "visiontransfer/imageprotocol.h" namespace "visiontransfer::ImageProtocol::ProtocolType":
cdef enum ProtocolType "visiontransfer::ImageProtocol::ProtocolType":
PROTOCOL_TCP
PROTOCOL_UDP
cdef extern from "visiontransfer/imageset.h" namespace "visiontransfer::ImageSet::ImageFormat":
cdef enum ImageFormat "visiontransfer::ImageSet::ImageFormat":
FORMAT_8_BIT_MONO
FORMAT_8_BIT_RGB
FORMAT_12_BIT_MONO
cdef extern from "visiontransfer/imageset.h" namespace "visiontransfer::ImageSet::ImageType":
cdef enum ImageType "visiontransfer::ImageSet::ImageType":
IMAGE_UNDEFINED
IMAGE_LEFT
IMAGE_DISPARITY
IMAGE_RIGHT
cdef extern from "visiontransfer/deviceparameters.h" namespace "visiontransfer::DeviceParameters::AutoMode":
cdef enum AutoMode "visiontransfer::DeviceParameters::AutoMode":
AUTO_EXPOSURE_AND_GAIN
AUTO_EXPOSURE_MANUAL_GAIN
MANUAL_EXPOSURE_AUTO_GAIN
MANUAL_EXPOSURE_MANUAL_GAIN
cdef extern from "visiontransfer/deviceparameters.h" namespace "visiontransfer::DeviceParameters::OperationMode":
cdef enum OperationMode "visiontransfer::DeviceParameters::OperationMode":
PASS_THROUGH
RECTIFY
STEREO_MATCHING
cdef extern from "visiontransfer/deviceparameters.h" namespace "visiontransfer::DeviceParameters::TargetFrame":
cdef enum TargetFrame "visiontransfer::DeviceParameters::TargetFrame":
LEFT_FRAME
RIGHT_FRAME
BOTH_FRAMES
cdef extern from "visiontransfer/deviceinfo.h" namespace "visiontransfer":
cdef cppclass DeviceStatus:
DeviceStatus() except +
bool isValid() except +
double getLastFps() except +
unsigned int getJumboMtu() except +
unsigned int getJumboFramesEnabled() except +
string getCurrentCaptureSource() except +
cdef extern from "visiontransfer/deviceinfo.h" namespace "visiontransfer":
cdef cppclass DeviceInfo:
DeviceInfo() except +
string getIpAddress() except +
NetworkProtocol getNetworkProtocol() except +
string getFirmwareVersion() except +
DeviceModel getModel() except +
DeviceStatus getStatus() except +
bool isCompatible() except +
string toString() except +
cdef extern from "visiontransfer/deviceenumeration.h" namespace "visiontransfer":
cdef cppclass DeviceEnumeration:
DeviceEnumeration() except +
vector[DeviceInfo] discoverDevices() except +
cdef extern from "visiontransfer/imageset.h" namespace "visiontransfer":
cdef cppclass ImageSet:
ImageSet() except +
int getWidth() except +
int getHeight() except +
int getRowStride(int imageNumber) except +
ImageFormat getPixelFormat(int imageNumber) except +
ImageFormat getPixelFormat(ImageType what) except +
unsigned char* getPixelData(int imageNumber) except +
const float* getQMatrix() except +
int getSequenceNumber() except +
void getTimestamp(int& seconds, int& microsec) except +
void getDisparityRange(int& minimum, int& maximum) except +
int getSubpixelFactor() except +
bool isImageDisparityPair() except +
int getBytesPerPixel(int imageNumber) except +
int getBitsPerPixel(int imageNumber) except +
int getNumberOfImages() except +
int getIndexOf(ImageType what, bool throwIfNotFound) except +
bool hasImageType(ImageType what) except +
int getExposureTime() except +
void getLastSyncPulse(int& seconds, int& microsec) except +
# Setters, primarily for deserialization
void setWidth(int width) except +
void setHeight(int height) except +
void setRowStride(int imageNumber, int rowStride) except +
void setPixelFormat(int imageNumber, ImageFormat imageFormat) except +
void setPixelData(int imageNumber, unsigned char* pixelData) except +
void setQMatrix(const float* q) except +
void setSequenceNumber(unsigned int num) except +
void setTimestamp(int seconds, int microseconds) except +
void setDisparityRange(int minimum, int maximum) except +
void setSubpixelFactor(int subpixFact) except +
void setNumberOfImages(int number) except +
void setIndexOf(ImageType what, int idx) except +
void setExposureTime(int timeMicrosec) except +
void setLastSyncPulse(int seconds, int microsec) except +
# Utility functions
void writePgmFile(int imageNumber, const char* fileName) except +
cdef extern from "visiontransfer/imagetransfer.h" namespace "visiontransfer":
cdef cppclass ImageTransfer:
ImageTransfer(const DeviceInfo& device, int bufferSize, int maxUdpPacketSize) except +
bool receiveImageSet(ImageSet& imageSet) except +
int getNumDroppedFrames() except +
bool isConnected() except +
void disconnect() except +
string getRemoteAddress() except +
cdef extern from "visiontransfer/asynctransfer.h" namespace "visiontransfer":
cdef cppclass AsyncTransfer:
AsyncTransfer(const DeviceInfo& device, int bufferSize, int maxUdpPacketSize) except +
bool collectReceivedImageSet(ImageSet& imageSet, double timeout) except +
int getNumDroppedFrames() except +
bool isConnected() except +
void disconnect() except +
string getRemoteAddress() except +
cdef extern from "visiontransfer/reconstruct3d.h" namespace "visiontransfer":
cdef cppclass Reconstruct3D:
Reconstruct3D() except +
float* createPointMap(const unsigned short* dispMap, int width, int height, int rowStride, const float* q, unsigned short minDisparity, int subpixelFactor, unsigned short maxDisparity) except +
void projectSinglePoint(int imageX, int imageY, unsigned short disparity, const float* q, float& pointX, float& pointY, float& pointZ, int subpixFactor) except +
float* createPointMap(const ImageSet& imageSet, unsigned short minDisparity) except +
void writePlyFile(const char* file, const ImageSet& imageSet, double maxZ, bool binary) except +
float* createZMap(const ImageSet& imageSet, unsigned short minDisparity, unsigned short maxDisparity) except +
# Also include auto-generated parameter glue code
include "visiontransfer_parameters_cpp_autogen.pxd"

View file

@ -0,0 +1,81 @@
set(HEADERS
asynctransfer.h
imageprotocol.h
imagetransfer.h
common.h
reconstruct3d.h
reconstruct3d-pcl.h
reconstruct3d-open3d.h
alignedallocator.h
datablockprotocol.h
imageset.h
imageset-opencv.h
imagepair.h
bitconversions.h
datachannelservicebase.h
datachannelservice.h
datachannel-imu-bno080.h
protocol-sh2-imu-bno080.h
deviceinfo.h
deviceenumeration.h
internalinformation.h
parameterinfo.h
parametertransfer.h
parametertransferdata.h
deviceparameters.h
scenescanparameters.h
standardparameterids.h
networking.h
sensorringbuffer.h
)
set(SOURCES
asynctransfer.cpp
imageprotocol.cpp
imagetransfer.cpp
reconstruct3d.cpp
datablockprotocol.cpp
imageset.cpp
bitconversions.cpp
datachannelservicebase.cpp
datachannelservice.cpp
datachannel-imu-bno080.cpp
deviceenumeration.cpp
internalinformation.cpp
parameterinfo.cpp
parametertransfer.cpp
deviceparameters.cpp
standardparameterids.cpp
networking.cpp
)
# Build static and shared version
add_library(visiontransfer${LIB_SUFFIX} SHARED
${HEADERS}
${SOURCES}
)
add_library(visiontransfer-static${LIB_SUFFIX} STATIC
${HEADERS}
${SOURCES}
)
if(${BUILD_WITHOUT_CXX11_ABI})
add_library(visiontransfer${OPEN3D_LIB_SUFFIX} SHARED
${HEADERS}
${SOURCES}
)
set_target_properties(visiontransfer${OPEN3D_LIB_SUFFIX} PROPERTIES COMPILE_FLAGS "${CMAKE_CXX_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=0")
endif()
if(WIN32)
target_link_libraries(visiontransfer${LIB_SUFFIX} ws2_32 Iphlpapi)
target_link_libraries(visiontransfer-static${LIB_SUFFIX} ws2_32 Iphlpapi)
else()
set_target_properties(visiontransfer-static${LIB_SUFFIX}
PROPERTIES COMPILE_FLAGS "-fPIC")
endif()
install(TARGETS visiontransfer${LIB_SUFFIX} DESTINATION lib)
install(TARGETS visiontransfer-static${LIB_SUFFIX} DESTINATION lib)
install(FILES ${HEADERS} DESTINATION include/visiontransfer)

View file

@ -0,0 +1,100 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef ALIGNEDALLOCATOR_H
#define ALIGNEDALLOCATOR_H
#include <cstdlib>
#include <memory>
#include <limits>
namespace visiontransfer {
namespace internal {
/**
* \brief STL-compatible allocator for memory-aligned allocations
*
* This is a helper class that is used internally for allocating memory
* that can be used with aligned SSE / AVX instructions.
*/
template<typename T, int alignment = 32>
class AlignedAllocator {
public :
// Typedefs
typedef T value_type;
typedef value_type* pointer;
typedef const value_type* const_pointer;
typedef value_type& reference;
typedef const value_type& const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
// convert an allocator<T> to allocator<U>
template<typename U>
struct rebind {
typedef AlignedAllocator<U> other;
};
explicit AlignedAllocator() {}
~AlignedAllocator() {}
explicit AlignedAllocator(AlignedAllocator const&) {}
template<typename U>
explicit AlignedAllocator(AlignedAllocator<U> const&) {}
// Address
inline pointer address(reference r) { return &r; }
inline const_pointer address(const_reference r) { return &r; }
// Memory allocation
pointer allocate(size_type cnt, typename std::allocator<void>::const_pointer = 0) {
// Allocate memory and align it
unsigned char* ptr = new unsigned char[sizeof(T) * cnt + (alignment-1) + 1];
unsigned char* alignedPtr = reinterpret_cast<unsigned char*>((size_t(ptr + 1) + alignment-1) & -alignment);
// Store offset in allocated memory area
alignedPtr[-1] = static_cast<unsigned char>(alignedPtr - ptr);
return reinterpret_cast<pointer>(alignedPtr);
}
void deallocate(pointer p, size_type) {
if(p != nullptr) {
// Get address of unaligned pointer
unsigned char* alignedPtr = reinterpret_cast<unsigned char*>(p);
unsigned char* unalignedPtr = alignedPtr - alignedPtr[-1];
// Delete it
::operator delete[](unalignedPtr);
}
}
// Size
size_type max_size() const {
return std::numeric_limits<size_type>::max() / sizeof(T);
}
// Construction
void construct(pointer p, const T& t) {
new(p) T(t);
}
// Destruction
void destroy(pointer p) {
p->~T();
}
};
}} // namespace
#endif

View file

@ -0,0 +1,445 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#if __GNUC__ == 4 && __GNUC_MINOR__ < 9
// This is a very ugly workaround for GCC bug 54562. If omitted,
// passing timeouts to collectReceivedImage() is broken.
#include <bits/c++config.h>
#undef _GLIBCXX_USE_CLOCK_MONOTONIC
#endif
#include <iostream>
#include <functional>
#include <stdexcept>
#include <thread>
#include <condition_variable>
#include <chrono>
#include <mutex>
#include <vector>
#include <cstring>
#include <algorithm>
#include "visiontransfer/asynctransfer.h"
#include "visiontransfer/alignedallocator.h"
using namespace std;
using namespace visiontransfer;
using namespace visiontransfer::internal;
namespace visiontransfer {
/*************** Pimpl class containing all private members ***********/
class AsyncTransfer::Pimpl {
public:
Pimpl(const char* address, const char* service,
ImageProtocol::ProtocolType protType, bool server,
int bufferSize, int maxUdpPacketSize);
~Pimpl();
// Redeclaration of public members
void sendImageSetAsync(const ImageSet& imageSet, bool deleteData);
bool collectReceivedImageSet(ImageSet& imageSet, double timeout);
int getNumDroppedFrames() const;
bool isConnected() const;
void disconnect();
std::string getRemoteAddress() const;
bool tryAccept();
private:
static constexpr int NUM_BUFFERS = 6;
static constexpr int SEND_THREAD_SHORT_WAIT_MS = 1;
static constexpr int SEND_THREAD_LONG_WAIT_MS = 10;
// The encapsulated image transfer object
ImageTransfer imgTrans;
// Variable for controlling thread termination
volatile bool terminate;
// There are two threads, one for sending and one for receiving.
// Each has a mutex and condition variable for synchronization.
std::thread sendThread;
std::mutex sendMutex;
std::condition_variable sendCond;
std::condition_variable sendWaitCond;
std::thread receiveThread;
std::timed_mutex receiveMutex;
std::condition_variable_any receiveCond;
std::condition_variable_any receiveWaitCond;
// Objects for exchanging images with the send and receive threads
ImageSet receivedSet;
std::vector<unsigned char, AlignedAllocator<unsigned char> > receivedData[NUM_BUFFERS];
bool newDataReceived;
ImageSet sendImageSet;
bool sendSetValid;
bool deleteSendData;
// Exception occurred in one of the threads
std::exception_ptr receiveException;
std::exception_ptr sendException;
bool sendThreadCreated;
bool receiveThreadCreated;
// Main loop for sending thread
void sendLoop();
// Main loop for receiving;
void receiveLoop();
void createSendThread();
};
/******************** Stubs for all public members ********************/
AsyncTransfer::AsyncTransfer(const char* address, const char* service,
ImageProtocol::ProtocolType protType, bool server,
int bufferSize, int maxUdpPacketSize)
: pimpl(new Pimpl(address, service, protType, server, bufferSize, maxUdpPacketSize)) {
}
AsyncTransfer::AsyncTransfer(const DeviceInfo& device, int bufferSize, int maxUdpPacketSize)
: pimpl(new Pimpl(device.getIpAddress().c_str(), "7681", static_cast<ImageProtocol::ProtocolType>(device.getNetworkProtocol()),
false, bufferSize, maxUdpPacketSize)) {
}
AsyncTransfer::~AsyncTransfer() {
delete pimpl;
}
void AsyncTransfer::sendImageSetAsync(const ImageSet& imageSet, bool deleteData) {
pimpl->sendImageSetAsync(imageSet, deleteData);
}
bool AsyncTransfer::collectReceivedImageSet(ImageSet& imageSet, double timeout) {
return pimpl->collectReceivedImageSet(imageSet, timeout);
}
int AsyncTransfer::getNumDroppedFrames() const {
return pimpl->getNumDroppedFrames();
}
bool AsyncTransfer::isConnected() const {
return pimpl->isConnected();
}
void AsyncTransfer::disconnect() {
return pimpl->disconnect();
}
std::string AsyncTransfer::getRemoteAddress() const {
return pimpl->getRemoteAddress();
}
bool AsyncTransfer::tryAccept() {
return pimpl->tryAccept();
}
/******************** Implementation in pimpl class *******************/
AsyncTransfer::Pimpl::Pimpl(const char* address, const char* service,
ImageProtocol::ProtocolType protType, bool server,
int bufferSize, int maxUdpPacketSize)
: imgTrans(address, service, protType, server, bufferSize, maxUdpPacketSize),
terminate(false), newDataReceived(false), sendSetValid(false),
deleteSendData(false), sendThreadCreated(false),
receiveThreadCreated(false) {
if(server) {
createSendThread();
}
}
AsyncTransfer::Pimpl::~Pimpl() {
terminate = true;
sendCond.notify_all();
receiveCond.notify_all();
sendWaitCond.notify_all();
receiveWaitCond.notify_all();
if(sendThreadCreated && sendThread.joinable()) {
sendThread.join();
}
if(receiveThreadCreated && receiveThread.joinable()) {
receiveThread.join();
}
if(sendSetValid && deleteSendData) {
delete[] sendImageSet.getPixelData(0);
delete[] sendImageSet.getPixelData(1);
}
}
void AsyncTransfer::Pimpl::createSendThread() {
if(!sendThreadCreated) {
// Lazy initialization of the send thread as it is not always needed
unique_lock<mutex> lock(sendMutex);
sendThread = thread(bind(&AsyncTransfer::Pimpl::sendLoop, this));
sendThreadCreated = true;
}
}
void AsyncTransfer::Pimpl::sendImageSetAsync(const ImageSet& imageSet, bool deleteData) {
createSendThread();
while(true) {
unique_lock<mutex> lock(sendMutex);
// Test for errors
if(sendException) {
std::rethrow_exception(sendException);
}
if(!sendSetValid) {
sendImageSet = imageSet;
sendSetValid = true;
deleteSendData = deleteData;
// Wake up the sender thread
sendCond.notify_one();
return;
} else {
// Wait for old data to be processed first
sendWaitCond.wait(lock);
}
}
}
bool AsyncTransfer::Pimpl::collectReceivedImageSet(ImageSet& imageSet, double timeout) {
if(!receiveThreadCreated) {
// Lazy initialization of receive thread
unique_lock<timed_mutex> lock(receiveMutex);
receiveThreadCreated = true;
receiveThread = thread(bind(&AsyncTransfer::Pimpl::receiveLoop, this));
}
// Acquire mutex
unique_lock<timed_mutex> lock(receiveMutex, std::defer_lock);
if(timeout < 0) {
lock.lock();
} else {
std::chrono::steady_clock::time_point lockStart =
std::chrono::steady_clock::now();
if(!lock.try_lock_for(std::chrono::microseconds(static_cast<unsigned int>(timeout*1e6)))) {
// Timed out
return false;
}
// Update timeout
unsigned int lockDuration = static_cast<unsigned int>(std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::steady_clock::now() - lockStart).count());
timeout = std::max(0.0, timeout - lockDuration*1e-6);
}
// Test for errors
if(receiveException) {
std::rethrow_exception(receiveException);
}
if(timeout == 0 && !newDataReceived) {
// No image has been received and we are not blocking
return false;
}
// If there is no data yet then keep on waiting
if(!newDataReceived) {
if(timeout < 0) {
while(!terminate && !receiveException && !newDataReceived) {
receiveCond.wait(lock);
}
} else {
receiveCond.wait_for(lock, std::chrono::microseconds(static_cast<unsigned int>(timeout*1e6)));
}
}
// Test for errors again
if(receiveException) {
std::rethrow_exception(receiveException);
}
if(newDataReceived) {
// Get the received image
imageSet = receivedSet;
newDataReceived = false;
receiveWaitCond.notify_one();
return true;
} else {
return false;
}
}
void AsyncTransfer::Pimpl::sendLoop() {
{
// Delay the thread start
unique_lock<mutex> lock(sendMutex);
}
ImageSet imgSet;
bool deleteSet = false;
try {
while(!terminate) {
// Wait for next image
{
unique_lock<mutex> lock(sendMutex);
// Wait for next frame to be queued
bool firstWait = true;
while(!terminate && !sendSetValid) {
imgTrans.transferData();
sendCond.wait_for(lock, std::chrono::milliseconds(
firstWait ? SEND_THREAD_SHORT_WAIT_MS : SEND_THREAD_LONG_WAIT_MS));
firstWait = false;
}
if(!sendSetValid) {
continue;
}
imgSet = sendImageSet;
deleteSet = deleteSendData;
sendSetValid = false;
sendWaitCond.notify_one();
}
if(!terminate) {
imgTrans.setTransferImageSet(imgSet);
imgTrans.transferData();
}
if(deleteSet) {
for (int i=0; i<imgSet.getNumberOfImages(); ++i) {
delete[] imgSet.getPixelData(i);
}
deleteSet = false;
}
}
} catch(...) {
// Store the exception for later
if(!sendException) {
sendException = std::current_exception();
}
sendWaitCond.notify_all();
// Don't forget to free the memory
if(deleteSet) {
for (int i=0; i<imgSet.getNumberOfImages(); ++i) {
delete[] imgSet.getPixelData(i);
}
deleteSet = false;
}
}
}
void AsyncTransfer::Pimpl::receiveLoop() {
{
// Delay the thread start
unique_lock<timed_mutex> lock(receiveMutex);
}
try {
ImageSet currentSet;
int bufferIndex = 0;
while(!terminate) {
// Receive new image
if(!imgTrans.receiveImageSet(currentSet)) {
// No image available
continue;
}
// Copy the pixel data
for(int i=0;i<currentSet.getNumberOfImages();i++) {
int bytesPerPixel = currentSet.getBytesPerPixel(i);
int newStride = currentSet.getWidth() * bytesPerPixel;
int totalSize = currentSet.getHeight() * newStride;
if(static_cast<int>(receivedData[i + bufferIndex].size()) < totalSize) {
receivedData[i + bufferIndex].resize(totalSize);
}
if(newStride == currentSet.getRowStride(i)) {
memcpy(&receivedData[i + bufferIndex][0], currentSet.getPixelData(i),
newStride*currentSet.getHeight());
} else {
for(int y = 0; y<currentSet.getHeight(); y++) {
memcpy(&receivedData[i + bufferIndex][y*newStride],
&currentSet.getPixelData(i)[y*currentSet.getRowStride(i)],
newStride);
}
currentSet.setRowStride(i, newStride);
}
currentSet.setPixelData(i, &receivedData[i + bufferIndex][0]);
}
{
unique_lock<timed_mutex> lock(receiveMutex);
// Wait for previously received data to be processed
while(newDataReceived) {
receiveWaitCond.wait_for(lock, std::chrono::milliseconds(100));
if(terminate) {
return;
}
}
// Notify that a new image set has been received
newDataReceived = true;
receivedSet = currentSet;
receiveCond.notify_one();
}
// Increment index for data buffers
bufferIndex = (bufferIndex + currentSet.getNumberOfImages()) % NUM_BUFFERS;
}
} catch(...) {
// Store the exception for later
if(!receiveException) {
receiveException = std::current_exception();
}
receiveCond.notify_all();
}
}
bool AsyncTransfer::Pimpl::isConnected() const {
return imgTrans.isConnected();
}
void AsyncTransfer::Pimpl::disconnect() {
imgTrans.disconnect();
}
std::string AsyncTransfer::Pimpl::getRemoteAddress() const {
return imgTrans.getRemoteAddress();
}
int AsyncTransfer::Pimpl::getNumDroppedFrames() const {
return imgTrans.getNumDroppedFrames();
}
bool AsyncTransfer::Pimpl::tryAccept() {
return imgTrans.tryAccept();
}
constexpr int AsyncTransfer::Pimpl::NUM_BUFFERS;
constexpr int AsyncTransfer::Pimpl::SEND_THREAD_SHORT_WAIT_MS;
constexpr int AsyncTransfer::Pimpl::SEND_THREAD_LONG_WAIT_MS;
} // namespace

View file

@ -0,0 +1,166 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_ASYNCTRANSFER_H
#define VISIONTRANSFER_ASYNCTRANSFER_H
#include "visiontransfer/common.h"
#include "visiontransfer/imagetransfer.h"
#include "visiontransfer/imageset.h"
#include "visiontransfer/imageprotocol.h"
#include "visiontransfer/deviceinfo.h"
namespace visiontransfer {
/**
* \brief Class for asynchronous transfer of image sets.
*
* This class opens a network socket for delivering or receiving image sets. All
* operations are performed asynchronously, which means that they do not block.
* The class encapsulates ImageTransfer.
*/
class VT_EXPORT AsyncTransfer {
public:
/**
* \brief Creates a new transfer object.
*
* \param address Address of the remote host to which a connection
* should be established. In server mode this can be a local
* interface address or NULL.
* \param service The port number that should be used as string or
* as textual service name.
* \param protType Specifies whether the UDP or TCP transport protocol
* shall be used.
* \param server If set to true, this object will be a communication server.
* \param bufferSize Buffer size for sending / receiving network data.
* \param maxUdpPacketSize Maximum allowed size of a UDP packet when sending data.
*
* Please see ImageTransfer::ImageTransfer() for further details.
*/
AsyncTransfer(const char* address, const char* service = "7681",
ImageProtocol::ProtocolType protType = ImageProtocol::PROTOCOL_UDP,
bool server = false, int bufferSize = 1048576, int maxUdpPacketSize = 1472);
/**
* \brief Creates a new transfer object by using the device information
* from device enumeration
*
* \param device Information on the device to which a connection should
* be established.
* \param bufferSize Buffer size for sending / receiving network data.
* \param maxUdpPacketSize Maximum allowed size of a UDP packet when sending data.
*/
AsyncTransfer(const DeviceInfo& device, int bufferSize = 1048576, int maxUdpPacketSize = 1472);
~AsyncTransfer();
/**
* \brief Starts an asynchronous transmission of the given image set
*
* \param imageSet The image set that shall be transmitted.
* \param deleteData If set to true, the pointers to the pixel data that
* are contained in \c imageSet, will be deleted after the
* image set has been transmitted.
*
* If deleteData is set to false, the pixel data contained in \c imageSet
* must not be freed before the data has been transmitted. As transmission
* happens asynchronously, it is recommended to let AsyncTransfer delete
* the data pointers.
*/
void sendImageSetAsync(const ImageSet& imageSet, bool deleteData = false);
#ifndef DOXYGEN_SHOULD_SKIP_THIS
DEPRECATED("Use sendImageSetAsync() instead")
inline void sendImagePairAsync(const ImageSet& imageSet, bool deleteData = false) {
sendImageSetAsync(imageSet, deleteData);
}
#endif
/**
* \brief Collects the asynchronously received image.
*
* \param imageSet The received image set.
* \param timeout The maximum time in seconds for which to wait if no
* image set has been received yet.
* \return True if an image set has been received before the timeout.
*
* If no image set has been received, this method might block or return false.
* Otherwise the returned image set is valid until the next call.
*
* If timeout is set to a value < 0, the function will block indefinitely.
* If timeout = 0, the function will return immediately, and if timeout is > 0 then
* the function will block for the given amount of time in seconds. The received
* image set is only valid until the next call of collectReceivedImageSet().
*/
bool collectReceivedImageSet(ImageSet& imageSet, double timeout = -1);
#ifndef DOXYGEN_SHOULD_SKIP_THIS
DEPRECATED("Use collectReceivedImageSet() instead")
inline bool collectReceivedImagePair(ImageSet& imageSet, double timeout = -1) {
return collectReceivedImageSet(imageSet, timeout);
}
#endif
/**
* \brief Returns the number of frames that have been dropped since
* connecting to the current remote host.
*
* Dropped frames are caused by dropped packets due to a poor network
* connection
*/
int getNumDroppedFrames() const;
/**
* \brief Tries to accept a client connection.
*
* \return True if a client has connected..
*
* This method can only be used in TCP server mode. It shall be called in
* regular intervals to allow for client connections. The method is
* non-blocking.
*/
bool tryAccept();
/**
* \brief Returns true if a remote connection is established
*/
bool isConnected() const;
/**
* \brief Terminates the current connection.
*
* If connected to a remote host this connection will be closed.
*/
void disconnect();
/**
* \brief Returns the address of the remote host
*
* \return Remote address or "" if no connection has been established.
*/
std::string getRemoteAddress() const;
private:
// We follow the pimpl idiom
class Pimpl;
Pimpl* pimpl;
// This class cannot be copied
AsyncTransfer(const AsyncTransfer& other);
AsyncTransfer& operator=(const AsyncTransfer&);
};
} // namespace
#endif

View file

@ -0,0 +1,303 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include "visiontransfer/bitconversions.h"
#include "visiontransfer/exceptions.h"
// SIMD Headers
#ifdef __AVX2__
# include <immintrin.h>
#elif __SSE4_1__
# include <smmintrin.h>
#elif __SSE2__
# include <emmintrin.h>
#endif
#ifdef __ARM_NEON
#include <arm_neon.h>
#endif
using namespace visiontransfer;
using namespace visiontransfer::internal;
namespace visiontransfer {
namespace internal {
void BitConversions::decode12BitPacked(int startRow, int stopRow, const unsigned char* src,
unsigned char* dst, int srcStride, int dstStride, int rowWidth) {
const unsigned char* dispStart = src;
# ifdef __SSE4_1__
if(rowWidth % 32 == 0) {
if(srcStride % 16 == 0 && reinterpret_cast<size_t>(src) % 16 == 0) {
decode12BitPackedSSE4<true>(startRow, stopRow, dispStart,
rowWidth, reinterpret_cast<unsigned short*>(dst), srcStride, dstStride);
} else {
decode12BitPackedSSE4<false>(startRow, stopRow, dispStart,
rowWidth, reinterpret_cast<unsigned short*>(dst), srcStride, dstStride);
}
} else // We use fallback implementation if the image width is not dividable by 32
# endif
# if defined(__ARM_NEON) && defined(__ARM_ARCH_ISA_A64)
if(rowWidth % 32 == 0) {
if(srcStride % 16 == 0 && reinterpret_cast<size_t>(src) % 16 == 0) {
decode12BitPackedNEON<true>(startRow, stopRow, dispStart,
rowWidth, reinterpret_cast<unsigned short*>(dst), srcStride, dstStride);
} else {
decode12BitPackedNEON<false>(startRow, stopRow, dispStart,
rowWidth, reinterpret_cast<unsigned short*>(dst), srcStride, dstStride);
}
} else // We use fallback implementation if the image width is not dividable by 32
# endif
{
decode12BitPackedFallback(startRow, stopRow, dispStart, rowWidth,
reinterpret_cast<unsigned short*>(dst), srcStride, dstStride);
}
}
#ifdef __SSE4_1__
template <bool alignedLoad>
void BitConversions::decode12BitPackedSSE4(int startRow, int stopRow, const unsigned char* dispStart,
int width, unsigned short* dst, int srcStride, int dstStride) {
if(width % 32 != 0) {
throw ProtocolException("Image width must be a multiple of 32!");
}
// SSE optimized code
unsigned char* outPos = &reinterpret_cast<unsigned char*>(dst)[startRow*dstStride];
int outRowPadding = dstStride - 2*width;
constexpr char ff = (char)0xff; // to prevent warnings
const __m128i shuffleMask1a = _mm_set_epi8(11, 10, 10, 9, 8, 7, 7, 6, 5, 4, 4, 3, 2, 1, 1, 0);
const __m128i shuffleMask1b = _mm_set_epi8(ff, ff, ff, ff, ff, ff, ff, ff, ff, ff, ff, 15, 14, 13, 13, 12);
const __m128i shuffleMask2a = _mm_set_epi8(7, 6, 6, 5, 4, 3, 3, 2, 1, 0, 0, ff, ff, ff, ff, ff);
const __m128i shuffleMask2b = _mm_set_epi8(ff, ff, ff, ff, ff, 15, 15, 14, 13, 12, 12, 11, 10, 9, 9, 8);
const __m128i shuffleMask3a = _mm_set_epi8(3, 2, 2, 1, 0, ff, ff, ff, ff, ff, ff, ff, ff, ff, ff, ff);
const __m128i shuffleMask3b = _mm_set_epi8(15, 14, 14, 13, 12, 11, 11, 10, 9, 8, 8, 7, 6, 5, 5, 4);
const __m128i shiftMultiplyMask = _mm_set_epi16(1, 16, 1, 16, 1, 16, 1, 16);
const __m128i blendMask1 = _mm_set_epi8(ff, ff, ff, ff, ff, ff, ff, ff, ff, ff, ff, 0, 0, 0, 0, 0);
const __m128i blendMask2 = _mm_set_epi8(ff, ff, ff, ff, ff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
int dispRowWidth = width * 3/2;
for(int y = startRow; y<stopRow; y++) {
const unsigned char* rowPos = &dispStart[y*srcStride];
const unsigned char* rowEnd = &dispStart[y*srcStride + dispRowWidth];
while(rowPos < rowEnd) {
// Load 16 pixels
// AA BA BB CC DC DD EE FE FF ...
__m128i rowPixels1, rowPixels2, rowPixels3;
if(alignedLoad) {
rowPixels1 = _mm_load_si128(reinterpret_cast<const __m128i*>(rowPos));
rowPos += 16;
rowPixels2 = _mm_load_si128(reinterpret_cast<const __m128i*>(rowPos));
rowPos += 16;
rowPixels3 = _mm_load_si128(reinterpret_cast<const __m128i*>(rowPos));
rowPos += 16;
} else {
rowPixels1 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(rowPos));
rowPos += 16;
rowPixels2 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(rowPos));
rowPos += 16;
rowPixels3 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(rowPos));
rowPos += 16;
}
// Duplicate bytes with shared data
// BAAA BBBA DCCC DDDC FEEE FFFE (example without endianess swap!)
__m128i part1 = _mm_shuffle_epi8(rowPixels1, shuffleMask1a);
__m128i part2a = _mm_shuffle_epi8(rowPixels1, shuffleMask1b);
__m128i part2b = _mm_shuffle_epi8(rowPixels2, shuffleMask2a);
__m128i part3a = _mm_shuffle_epi8(rowPixels2, shuffleMask2b);
__m128i part3b = _mm_shuffle_epi8(rowPixels3, shuffleMask3a);
__m128i part4 = _mm_shuffle_epi8(rowPixels3, shuffleMask3b);
__m128i part2 = _mm_blendv_epi8(part2a, part2b, blendMask1);
__m128i part3 = _mm_blendv_epi8(part3a, part3b, blendMask2);
// Shift left through multiplication
// AAA0 BBBA CCC0 DDDC EEE0 FFFE
__m128i shift1a = _mm_mullo_epi16(part1, shiftMultiplyMask);
__m128i shift2a = _mm_mullo_epi16(part2, shiftMultiplyMask);
__m128i shift3a = _mm_mullo_epi16(part3, shiftMultiplyMask);
__m128i shift4a = _mm_mullo_epi16(part4, shiftMultiplyMask);
// Shift right again
// 0AAA 0BBB 0CCC 0DDD 0EEE 0FFF ...
__m128i shift1b = _mm_srli_epi16(shift1a, 4);
__m128i shift2b = _mm_srli_epi16(shift2a, 4);
__m128i shift3b = _mm_srli_epi16(shift3a, 4);
__m128i shift4b = _mm_srli_epi16(shift4a, 4);
_mm_storeu_si128(reinterpret_cast<__m128i*>(outPos), shift1b);
outPos += 16;
_mm_storeu_si128(reinterpret_cast<__m128i*>(outPos), shift2b);
outPos += 16;
_mm_storeu_si128(reinterpret_cast<__m128i*>(outPos), shift3b);
outPos += 16;
_mm_storeu_si128(reinterpret_cast<__m128i*>(outPos), shift4b);
outPos += 16;
}
outPos += outRowPadding;
}
}
#endif
#if defined(__ARM_NEON) && defined(__ARM_ARCH_ISA_A64)
#define TX(y,x) ((x + y*16)/3 + ((x + y*16)%3)*16)
template <bool alignedLoad>
void BitConversions::decode12BitPackedNEON(int startRow, int stopRow, const unsigned char* dispStart,
int width, unsigned short* dst, int srcStride, int dstStride) {
if(width % 32 != 0) {
throw ProtocolException("Image width must be a multiple of 32!");
}
// ARM NEON A64 optimized code
unsigned char* outPos = &reinterpret_cast<unsigned char*>(dst)[startRow*dstStride];
int outRowPadding = dstStride - 2*width;
// Shuffle mask already performs endianess swapping
const uint8x16_t shuffleMask1 = {TX(0,0), TX(0,1), TX(0,1), TX(0,2), TX(0,3), TX(0,4),
TX(0,4), TX(0,5), TX(0,6), TX(0,7), TX(0,7), TX(0,8), TX(0,9), TX(0,10), TX(0,10), TX(0,11)};
const uint8x16_t shuffleMask2 = {TX(0,12), TX(0,13), TX(0,13), TX(0,14), TX(0,15), TX(1,0),
TX(1,0), TX(1,1), TX(1,2), TX(1,3), TX(1,3), TX(1,4), TX(1,5), TX(1,6), TX(1,6), TX(1,7)};
const uint8x16_t shuffleMask3 = {TX(1,8), TX(1,9), TX(1,9), TX(1,10), TX(1,11), TX(1,12),
TX(1,12), TX(1,13), TX(1,14), TX(1,15), TX(1,15), TX(2,0), TX(2,1), TX(2,2), TX(2,2), TX(2,3)};
const uint8x16_t shuffleMask4 = {TX(2,4), TX(2,5), TX(2,5), TX(2,6), TX(2,7), TX(2,8),
TX(2,8), TX(2,9), TX(2,10), TX(2,11), TX(2,11), TX(2,12), TX(2,13), TX(2,14), TX(2,14), TX(2,15)};
const int16x8_t shiftMask = {4, 0, 4, 0, 4, 0, 4, 0};
int dispRowWidth = width * 3/2;
for(int y = startRow; y<stopRow; y++) {
const unsigned char* rowPos = &dispStart[y*srcStride];
const unsigned char* rowEnd = &dispStart[y*srcStride + dispRowWidth];
while(rowPos < rowEnd) {
// Load 16 pixels
// AA BA BB CC DC DD EE FE FF
uint8x16x3_t rowPixels;
if(alignedLoad) {
rowPixels = vld3q_u8(reinterpret_cast<const uint8_t*>(
__builtin_assume_aligned(rowPos, 16)));
} else {
rowPixels = vld3q_u8(reinterpret_cast<const uint8_t*>(rowPos));
}
rowPos += 48;
// Duplicate bytes with shared data
// BAAA BBBA DCCC DDDC FEEE FFFE (example without endianess swap!)
uint8x16_t part1 = vqtbl3q_u8(rowPixels, shuffleMask1);
uint8x16_t part2 = vqtbl3q_u8(rowPixels, shuffleMask2);
uint8x16_t part3 = vqtbl3q_u8(rowPixels, shuffleMask3);
uint8x16_t part4 = vqtbl3q_u8(rowPixels, shuffleMask4);
// Shift left
// AAA0 BBBA CCC0 DDDC EEE0 FFFE
uint16x8_t shift1a = vshlq_u16(vreinterpretq_u16_u8(part1), shiftMask);
uint16x8_t shift2a = vshlq_u16(vreinterpretq_u16_u8(part2), shiftMask);
uint16x8_t shift3a = vshlq_u16(vreinterpretq_u16_u8(part3), shiftMask);
uint16x8_t shift4a = vshlq_u16(vreinterpretq_u16_u8(part4), shiftMask);
// Shift right again
// 0AAA 0BBB 0CCC 0DDD 0EEE 0FFF ...
uint16x8_t shift1b = vshrq_n_u16(shift1a, 4);
uint16x8_t shift2b = vshrq_n_u16(shift2a, 4);
uint16x8_t shift3b = vshrq_n_u16(shift3a, 4);
uint16x8_t shift4b = vshrq_n_u16(shift4a, 4);
vst1q_u16(reinterpret_cast<uint16_t*>(outPos), shift1b);
outPos += 16;
vst1q_u16(reinterpret_cast<uint16_t*>(outPos), shift2b);
outPos += 16;
vst1q_u16(reinterpret_cast<uint16_t*>(outPos), shift3b);
outPos += 16;
vst1q_u16(reinterpret_cast<uint16_t*>(outPos), shift4b);
outPos += 16;
}
outPos += outRowPadding;
}
}
#endif
void BitConversions::decode12BitPackedFallback(int startRow, int stopRow, const unsigned char* dispStart,
int width, unsigned short* dst, int srcStride, int dstStride) {
int dstStrideShort = dstStride/2;
// Non-SSE version
for(int y = startRow; y < stopRow; y++) {
const unsigned char* srcPtr = &dispStart[y*srcStride];
unsigned short* dstPtr = &dst[y*dstStrideShort];
unsigned short* dstEndPtr = dstPtr + width;
while(dstPtr != dstEndPtr) {
*dstPtr = static_cast<unsigned short>(*srcPtr);
srcPtr++;
*dstPtr |= static_cast<unsigned short>(*srcPtr & 0x0f) << 8;
dstPtr++;
*dstPtr = static_cast<unsigned short>(*srcPtr) >> 4;
srcPtr++;
*dstPtr |= static_cast<unsigned short>(*srcPtr) << 4;
srcPtr++;
dstPtr++;
}
}
}
void BitConversions::encode12BitPacked(int startRow, int stopRow, const unsigned char* src,
unsigned char* dst, int srcStride, int dstStride, int rowWidth) {
const unsigned short* srcShort = reinterpret_cast<const unsigned short*>(src);
int srcStrideShort = srcStride/2;
// SSE/NEON optimization is not yet available
for(int y = startRow; y < stopRow; y++) {
const unsigned short* srcPtr = &srcShort[y*srcStrideShort];
const unsigned short* srcEndPtr = srcPtr + rowWidth;
unsigned char* dstPtr = &dst[y*dstStride];
while(srcPtr != srcEndPtr) {
*dstPtr = static_cast<unsigned char>(*srcPtr);
dstPtr++;
*dstPtr = static_cast<unsigned char>(*srcPtr >> 8) & 0x0f;
srcPtr++;
*dstPtr |= static_cast<unsigned char>(*srcPtr) << 4;
dstPtr++;
*dstPtr = static_cast<unsigned char>(*srcPtr >> 4);
srcPtr++;
dstPtr++;
}
}
}
}} // namespace

View file

@ -0,0 +1,48 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_BITCONVERSIONS_H
#define VISIONTRANSFER_BITCONVERSIONS_H
namespace visiontransfer {
namespace internal {
/**
* \brief Various implementations for converting from 12-bit to
* 16-bit per pixels formats.
*/
class BitConversions {
public:
static void decode12BitPacked(int startRow, int stopRow, const unsigned char* src,
unsigned char* dst, int srcStride, int dstStride, int rowWidth);
static void encode12BitPacked(int startRow, int stopRow, const unsigned char* src,
unsigned char* dst, int srcStride, int dstStride, int rowWidth);
private:
template <bool alignedLoad>
static void decode12BitPackedSSE4(int startRow, int stopRow, const unsigned char* dispStart,
int width, unsigned short* dst, int srcStride, int dstStride);
template <bool alignedLoad>
static void decode12BitPackedNEON(int startRow, int stopRow, const unsigned char* dispStart,
int width, unsigned short* dst, int srcStride, int dstStride);
static void decode12BitPackedFallback(int startRow, int stopRow, const unsigned char* dispStart,
int width, unsigned short* dst, int srcStride, int dstStride);
};
}} // namespace
#endif

View file

@ -0,0 +1,41 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_COMMON_H
#define VISIONTRANSFER_COMMON_H
#ifdef _MSC_VER
// Visual studio needs an explicit export statement
# define VT_EXPORT __declspec(dllexport)
#else
# define VT_EXPORT
#endif
// Macro for marking functions / variables as deprecated
#ifdef VISIONTRANSFER_NO_DEPRECATION_WARNINGS
// For referencing all of our own code (e.g. for Python wrapper)
# define DEPRECATED(msg)
#else
# if __cplusplus >= 201402L
# define DEPRECATED(msg) [[deprecated(msg)]]
# elif defined(__GNUC__) || defined(__clang__)
# define DEPRECATED(msg) __attribute__ ((deprecated(msg)))
# elif defined(_MSC_VER)
# define DEPRECATED(msg) __declspec(deprecated(msg))
# else
# define DEPRECATED(msg)
# endif
#endif
#endif

View file

@ -0,0 +1,884 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include <algorithm>
#include <iostream>
#include <cstring>
#include <iomanip>
#include <sstream>
#include "visiontransfer/datablockprotocol.h"
#include "visiontransfer/exceptions.h"
// Network headers
#ifdef _WIN32
#include <winsock2.h>
#undef min
#undef max
#else
#include <arpa/inet.h>
#endif
#define LOG_ERROR(expr)
//#define LOG_ERROR(expr) std::cerr << "DataBlockProtocol: " << expr << std::endl
using namespace std;
using namespace visiontransfer;
using namespace visiontransfer::internal;
namespace visiontransfer {
namespace internal {
DataBlockProtocol::DataBlockProtocol(bool server, ProtocolType protType, int maxUdpPacketSize)
: isServer(server), protType(protType),
transferDone(true),
overwrittenTransferData{0},
overwrittenTransferIndex{-1},
overwrittenTransferBlock{-1},
transferHeaderData{nullptr},
transferHeaderSize{0},
totalBytesCompleted{0}, totalTransferSize{0},
waitingForMissingSegments(false),
totalReceiveSize(0), connectionConfirmed(false),
confirmationMessagePending(false), eofMessagePending(false),
clientConnectionPending(false), resendMessagePending(false),
lastRemoteHostActivity(), lastSentHeartbeat(),
lastReceivedHeartbeat(std::chrono::steady_clock::now()),
finishedReception(false), droppedReceptions(0),
completedReceptions(0), lostSegmentRate(0.0), lostSegmentBytes(0),
unprocessedMsgLength(0), headerReceived(false) {
// Determine the maximum allowed payload size
if(protType == PROTOCOL_TCP) {
maxPayloadSize = MAX_TCP_BYTES_TRANSFER - sizeof(SegmentHeaderTCP);
minPayloadSize = 0;
} else {
maxPayloadSize = maxUdpPacketSize - sizeof(SegmentHeaderUDP);
minPayloadSize = maxPayloadSize;
}
zeroStructures();
resizeReceiveBuffer();
resetReception(false);
}
void DataBlockProtocol::splitRawOffset(int rawSegmentOffset, int& dataBlockID, int& segmentOffset) {
int selector = (rawSegmentOffset >> 28) & 0xf;
dataBlockID = selector & 0x7; // Note: 0x8 bit is reserved for now
segmentOffset = rawSegmentOffset & 0x0FFFffff;
}
int DataBlockProtocol::mergeRawOffset(int dataBlockID, int segmentOffset, int reserved_defaults0) {
return ((reserved_defaults0 & 1) << 31) | ((dataBlockID & 0x07) << 28) | (segmentOffset & 0x0FFFffff);
}
void DataBlockProtocol::zeroStructures() {
for (int i=0; i<MAX_DATA_BLOCKS; ++i) {
rawDataArr[i] = nullptr;
rawDataArrStrideHackOrig[i] = 0;
rawDataArrStrideHackRepl[i] = 0;
rawValidBytes[i] = 0;
transferOffset[i] = 0;
transferSize[i] = 0;
}
std::memset(overwrittenTransferData, 0, sizeof(overwrittenTransferData));
overwrittenTransferIndex = -1;
overwrittenTransferBlock = -1;
lastTransmittedBlock = -1;
receiveOffset = 0;
numReceptionBlocks = 0;
}
void DataBlockProtocol::resetTransfer() {
transferDone = true;
overwrittenTransferIndex = -1;
overwrittenTransferBlock = -1;
totalBytesCompleted = 0;
totalTransferSize = 0;
numTransferBlocks = 0;
missingTransferSegments.clear();
}
void DataBlockProtocol::setTransferBytes(int block, long bytes) {
if (transferHeaderData == nullptr) {
throw ProtocolException("Tried to set data block size before initializing header!");
} else if (block >= numTransferBlocks) {
throw ProtocolException("Request to set data block size - block index too high!");
}
transferSize[block] = bytes;
HeaderPreamble* hp = reinterpret_cast<HeaderPreamble*>(transferHeaderData);
hp->netTransferSizes[block] = htonl(bytes);
}
void DataBlockProtocol::setTransferHeader(unsigned char* data, int headerSize, int blocks) {
if(!transferDone && numTransferBlocks > 0) {
throw ProtocolException("Header data set while transfer is active!");
} else if(headerSize + 9 > static_cast<int>(sizeof(controlMessageBuffer))) {
throw ProtocolException("Transfer header is too large!");
}
numTransferBlocks = blocks;
transferDone = false;
for (int i=0; i<MAX_DATA_BLOCKS; ++i) {
this->transferSize[i] = 0; // must be set via setRawTransferBytes()
}
int headerBaseOffset = sizeof(HeaderPreamble);
transferHeaderData = &data[-headerBaseOffset];
HeaderPreamble* ourHeader = reinterpret_cast<HeaderPreamble*>(transferHeaderData);
unsigned short netHeaderSize = htons(static_cast<unsigned short>(headerSize));
ourHeader->netHeaderSize = netHeaderSize;
ourHeader->netTransferSizeDummy = htonl(-1); // clashes on purpose with old recipients
headerSize += headerBaseOffset;
if(protType == PROTOCOL_UDP) {
// In UDP mode we still need to make this a control message
transferHeaderData[headerSize++] = HEADER_MESSAGE;
transferHeaderData[headerSize++] = 0xFF;
transferHeaderData[headerSize++] = 0xFF;
transferHeaderData[headerSize++] = 0xFF;
transferHeaderData[headerSize++] = 0xFF;
}
transferHeaderSize = headerSize;
}
void DataBlockProtocol::setTransferData(int block, unsigned char* data, int validBytes) {
if(transferHeaderSize == 0 || transferHeaderData == nullptr) {
throw ProtocolException("The transfer header has not yet been set!");
}
transferDone = false;
rawDataArr[block] = data;
transferOffset[block] = 0;
overwrittenTransferIndex = -1;
overwrittenTransferBlock = -1;
rawValidBytes[block] = min(transferSize[block], validBytes);
totalBytesCompleted = 0;
}
void DataBlockProtocol::setTransferValidBytes(int block, int validBytes) {
if(validBytes >= transferSize[block]) {
rawValidBytes[block] = transferSize[block];
} else if(validBytes < static_cast<int>(sizeof(int))) {
rawValidBytes[block] = 0;
} else {
rawValidBytes[block] = validBytes;
}
}
std::string DataBlockProtocol::statusReport() {
std::stringstream ss;
ss << "DataBlockProtocol, blocks=" << numTransferBlocks << ": ";
for (int i=0; i<numTransferBlocks; ++i) {
ss << i << ":(len " << transferSize[i] << " ofs " << transferOffset[i] << " rawvalid " << rawValidBytes[i] << ") ";
}
ss << " total done: " << totalBytesCompleted << "/" << totalTransferSize;
return ss.str();
}
const unsigned char* DataBlockProtocol::getTransferMessage(int& length) {
if(transferDone || rawValidBytes == 0) {
// No more data to be transferred
length = 0;
return nullptr;
}
// For TCP we always send the header first
if(protType == PROTOCOL_TCP && !anyPayloadReceived() && transferHeaderData != nullptr) {
length = transferHeaderSize;
const unsigned char* ret = transferHeaderData;
transferHeaderData = nullptr;
return ret;
}
// The transfer buffer might have been altered by the previous transfer
// and first needs to be restored
restoreTransferBuffer();
// Determine which data segment to transfer next
int block = -1, offset = -1;
getNextTransferSegment(block, offset, length);
if(length == 0) {
return nullptr;
}
if(protType == PROTOCOL_UDP) {
// For udp, we always append a segment offset
overwrittenTransferBlock = block;
overwrittenTransferIndex = offset + length;
SegmentHeaderUDP* segmentHeader = reinterpret_cast<SegmentHeaderUDP*>(&rawDataArr[block][offset + length]);
std::memcpy(overwrittenTransferData, segmentHeader, sizeof(SegmentHeaderUDP));
segmentHeader->segmentOffset = static_cast<int>(htonl(mergeRawOffset(block, offset)));
length += sizeof(SegmentHeaderUDP);
lastTransmittedBlock = block;
return &rawDataArr[block][offset];
} else {
// For tcp, we *PRE*pend the header consisting of segment offset plus the packet payload size
int headerOffset = offset - sizeof(SegmentHeaderTCP);
overwrittenTransferBlock = block;
overwrittenTransferIndex = headerOffset;
SegmentHeaderTCP* segmentHeader = reinterpret_cast<SegmentHeaderTCP*>(&rawDataArr[block][headerOffset]);
std::memcpy(overwrittenTransferData, segmentHeader, sizeof(SegmentHeaderTCP));
segmentHeader->fragmentSize = htons(length);
segmentHeader->segmentOffset = static_cast<int>(htonl(mergeRawOffset(block, offset)));
length += sizeof(SegmentHeaderTCP);
lastTransmittedBlock = block;
return &rawDataArr[block][headerOffset];
}
}
void DataBlockProtocol::getNextTransferSegment(int& block, int& offset, int& length) {
if(missingTransferSegments.size() == 0) {
// Select from block with the most unsent data
int sendBlock = 0, amount = 0;
for (int i=0; i<numTransferBlocks; ++i) {
int avail = std::min(transferSize[i], rawValidBytes[i]);
avail -= transferOffset[i];
if (avail > amount) {
amount = avail;
sendBlock = i;
}
}
length = std::min(maxPayloadSize, amount);
if(length == 0 || (length < minPayloadSize && rawValidBytes[sendBlock] != transferSize[sendBlock])) {
length = 0;
return;
}
block = sendBlock;
offset = transferOffset[sendBlock];
transferOffset[sendBlock] += length; // for next transfer
if (protType == PROTOCOL_UDP) {
bool complete = true;
for (int i=0; i<numTransferBlocks; ++i) {
if (transferOffset[i] < transferSize[i]) {
complete = false;
break;
}
}
if (complete) {
eofMessagePending = true;
}
}
} else {
// This is a segment that is re-transmitted due to packet loss
splitRawOffset(missingTransferSegments.front().first, block, offset);
length = std::min(maxPayloadSize, missingTransferSegments.front().second);
LOG_ERROR("Re-transmitting: " << offset << " - " << (offset + length));
int remaining = missingTransferSegments[0].second - length;
if(remaining == 0) {
// The segment is competed
missingTransferSegments.pop_front();
} else {
// The segment is only partially complete
missingTransferSegments.front().first += length;
missingTransferSegments.front().second = remaining;
}
}
}
void DataBlockProtocol::restoreTransferBuffer() {
if(overwrittenTransferBlock >= 0) {
if(protType == PROTOCOL_UDP) {
std::memcpy(&rawDataArr[overwrittenTransferBlock][overwrittenTransferIndex], overwrittenTransferData, sizeof(SegmentHeaderUDP));
} else {
std::memcpy(&rawDataArr[overwrittenTransferBlock][overwrittenTransferIndex], overwrittenTransferData, sizeof(SegmentHeaderTCP));
}
}
overwrittenTransferIndex = -1;
overwrittenTransferBlock = -1;
}
bool DataBlockProtocol::transferComplete() {
for (int i=0; i<numTransferBlocks; ++i) {
if (transferOffset[i] < transferSize[i]) return false;
}
return !eofMessagePending;
}
int DataBlockProtocol::getMaxReceptionSize() const {
if(protType == PROTOCOL_TCP) {
return MAX_TCP_BYTES_TRANSFER;
} else {
return MAX_UDP_RECEPTION;
}
}
unsigned char* DataBlockProtocol::getNextReceiveBuffer(int maxLength) {
if(receiveOffset + maxLength > receiveBuffer.size()) {
receiveBuffer.resize(receiveOffset + maxLength);
}
return &receiveBuffer[receiveOffset];
}
void DataBlockProtocol::processReceivedMessage(int length, bool& transferComplete) {
transferComplete = false;
if(length <= 0) {
return; // Nothing received
}
if(finishedReception) {
// First reset for next frame
resetReception(false);
}
if(protType == PROTOCOL_UDP) {
processReceivedUdpMessage(length, transferComplete);
} else {
processReceivedTcpMessage(length, transferComplete);
}
transferComplete = finishedReception;
}
void DataBlockProtocol::processReceivedUdpMessage(int length, bool& transferComplete) {
if(length < static_cast<int>(sizeof(int)) ||
0 + length > static_cast<int>(receiveBuffer.size())) {
throw ProtocolException("Received message size is invalid!");
}
// Extract the sequence number
int rawSegmentOffset = ntohl(*reinterpret_cast<int*>(
&receiveBuffer[0 + length - sizeof(int)]));
// for holding the offset with blanked-out channel index
int dataBlockID, segmentOffset;
splitRawOffset(rawSegmentOffset, dataBlockID, segmentOffset);
if(rawSegmentOffset == static_cast<int>(0xFFFFFFFF)) {
// This is a control packet
processControlMessage(length);
} else if(headerReceived) {
// Correct the length by subtracting the size of the segment offset
int realPayloadOffset = 0;
int payloadLength = length - sizeof(int);
if(segmentOffset != blockReceiveOffsets[dataBlockID]) {
// The segment offset doesn't match what we expected. Probably
// a packet was dropped
if(!waitingForMissingSegments && //receiveOffset > 0 &&
segmentOffset > blockReceiveOffsets[dataBlockID]
&& segmentOffset + payloadLength < (int)blockReceiveBuffers[dataBlockID].size()) {
// We can just ask for a retransmission of this packet
LOG_ERROR("Missing segment: " << blockReceiveOffsets[dataBlockID] << " - " << segmentOffset
<< " (" << missingReceiveSegments.size() << ")");
MissingReceiveSegment missingSeg;
missingSeg.offset = mergeRawOffset(dataBlockID, blockReceiveOffsets[dataBlockID]);
missingSeg.length = segmentOffset - blockReceiveOffsets[dataBlockID];
missingSeg.isEof = false;
lostSegmentBytes += missingSeg.length;
missingReceiveSegments.push_back(missingSeg);
// Move the received data to the right place in the buffer
memcpy(&blockReceiveBuffers[dataBlockID][segmentOffset], &receiveBuffer[0 + realPayloadOffset], payloadLength);
// Advance block receive offset
blockReceiveOffsets[dataBlockID] = segmentOffset + payloadLength;
} else {
// In this case we cannot recover from the packet loss or
// we just didn't get the EOF packet and everything is
// actually fine
resetReception(blockReceiveOffsets[0] > 0);
if(segmentOffset > 0 ) {
if(blockReceiveOffsets[dataBlockID] > 0) {
LOG_ERROR("Resend failed!");
}
return;
} else {
LOG_ERROR("Missed EOF message!");
}
}
} else {
if ((realPayloadOffset+payloadLength) > (int)receiveBuffer.size()) {
throw ProtocolException("Received out-of-bound data.");
}
// append to correct block buffer
memcpy(&blockReceiveBuffers[dataBlockID][segmentOffset], &receiveBuffer[0 + realPayloadOffset], payloadLength);
// advance the expected next data offset for this block
blockReceiveOffsets[dataBlockID] = segmentOffset + payloadLength;
if (waitingForMissingSegments) {
// segment extends the currently valid region (suspended once we missed out first segment)
if ((missingReceiveSegments.size() == 1) && (missingReceiveSegments.front().length <= payloadLength)) {
// last gap closed by this segment
blockValidSize[dataBlockID] = blockReceiveSize[dataBlockID];
} else {
blockValidSize[dataBlockID] = segmentOffset + payloadLength;
}
} else if (missingReceiveSegments.size() == 0) {
blockValidSize[dataBlockID] = segmentOffset + payloadLength;
}
}
if(segmentOffset == 0 && dataBlockID == 0) {
// This is the beginning of a new frame
lastRemoteHostActivity = std::chrono::steady_clock::now();
}
// Try to fill missing regions
integrateMissingUdpSegments(dataBlockID, segmentOffset, payloadLength);
}
}
void DataBlockProtocol::integrateMissingUdpSegments(int block, int lastSegmentOffset, int lastSegmentSize) {
if(waitingForMissingSegments) {
// Things get more complicated when re-transmitting dropped packets
int checkBlock, checkOffset;
MissingReceiveSegment& firstSeg = missingReceiveSegments.front();
splitRawOffset(firstSeg.offset, checkBlock, checkOffset);
if(lastSegmentOffset != checkOffset) {
LOG_ERROR("Received invalid resend: " << lastSegmentOffset);
resetReception(true);
} else {
firstSeg.offset += lastSegmentSize;
firstSeg.length -= lastSegmentSize;
if(firstSeg.length == 0) {
missingReceiveSegments.pop_front();
}
if(missingReceiveSegments.size() == 0) {
waitingForMissingSegments = false;
finishedReception = true;
} else {
blockReceiveOffsets[block] = missingReceiveSegments.front().offset;
}
}
}
}
void DataBlockProtocol::processReceivedTcpMessage(int length, bool& transferComplete) {
// In TCP mode the header must be the first data item to be transmitted
if(!headerReceived) {
int totalHeaderSize = parseReceivedHeader(length, 0);
if(totalHeaderSize == 0) {
// Not yet enough data. Keep on buffering.
receiveOffset += length; // append in next recv
return;
} else {
// Header successfully parsed
// Move the remaining data to the beginning of the buffer
length -= totalHeaderSize;
// The rest is the first [part of] buffer segment data
if(length == 0) {
return; // No more data remaining
}
int movelength = receiveOffset + length; // also move the old stuff
::memmove(&receiveBuffer[0], &receiveBuffer[totalHeaderSize], movelength);
receiveOffset = movelength; // append in next recv
}
} else {
receiveOffset += length; // modified below if complete chunks are present
}
if (legacyTransfer) {
// Legacy TCP transfer: no segment headers, just raw data for block 0, up to the expected size
int remainingSize = blockReceiveSize[0] - blockValidSize[0];
int availableSize = std::min(receiveOffset, remainingSize);
// Update actual target buffer
std::memcpy(&blockReceiveBuffers[0][blockReceiveOffsets[0]], &receiveBuffer[0], availableSize);
blockReceiveOffsets[0] += availableSize;
blockValidSize[0] = blockReceiveOffsets[0];
// Extra data, store at buffer start for next reception to append to
if (receiveOffset <= remainingSize) {
// Start next reception at recv buffer start
receiveOffset = 0;
} else {
// Mark next reception to append to unhandled data remainder
std::memmove(&receiveBuffer[0], &receiveBuffer[remainingSize], availableSize - remainingSize);
receiveOffset = availableSize - remainingSize;
}
} else {
// Parse the SegmentHeaderTCP (if present) to see if a full fragment is present
int ofs = 0;
while ((receiveOffset - ofs) >= (int) sizeof(SegmentHeaderTCP)) {
SegmentHeaderTCP* header = reinterpret_cast<SegmentHeaderTCP*>(&receiveBuffer[ofs]);
int fragsize = ntohs(header->fragmentSize);
int rawSegmentOffset = ntohl(header->segmentOffset);
int block, offset;
splitRawOffset(rawSegmentOffset, block, offset);
if (block == 7) { // Block 7 is reserved; control message (the next header), stop moving image data
break;
}
if ((receiveOffset - ofs) >= (fragsize + (int) sizeof(SegmentHeaderTCP))) {
// Incorporate fragment
// assert here that offset==blockReceiveOffsets[block]
if (offset != blockReceiveOffsets[block]) {
throw ProtocolException("Received invalid header!");
}
std::memcpy(&blockReceiveBuffers[block][blockReceiveOffsets[block]], &receiveBuffer[ofs+sizeof(SegmentHeaderTCP)], fragsize);
blockReceiveOffsets[block] += fragsize;
blockValidSize[block] = blockReceiveOffsets[block];
// Advance to next potential chunk
ofs += fragsize + sizeof(SegmentHeaderTCP);
} else {
// Fragment incomplete, will be appended to in next recv (offset increased above)
break;
}
}
if (ofs > 0) {
// Move start of next unaccounted-for fragment to start of buffer
std::memmove(&receiveBuffer[0], &receiveBuffer[ofs], receiveOffset - ofs);
receiveOffset -= ofs; // and shift append position accordingly
}
}
// Determine whether all buffers are filled now
bool complete = true;
for (int i=0; i<numReceptionBlocks; ++i) {
if (blockReceiveOffsets[i] < blockReceiveSize[i]) {
complete = false;
break;
}
}
finishedReception = complete;
}
int DataBlockProtocol::parseReceivedHeader(int length, int offset) {
int headerExtraBytes = 6; // see below
if(length < headerExtraBytes) {
return 0;
}
unsigned short headerSize = ntohs(*reinterpret_cast<unsigned short*>(&receiveBuffer[offset]));
if (length < (headerExtraBytes + headerSize)) {
return 0;
}
totalReceiveSize = static_cast<int>(ntohl(*reinterpret_cast<unsigned int*>(&receiveBuffer[offset + 2])));
if (totalReceiveSize >= 0) { // old-style single block transfer
legacyTransfer = true;
headerExtraBytes = 6;
numReceptionBlocks = 1; // ONE interleaved buffer
blockReceiveSize[0] = totalReceiveSize;
} else { // marked -1 for new-style multi block transfer
legacyTransfer = false;
headerExtraBytes = static_cast<int>(sizeof(HeaderPreamble));
HeaderPreamble* header = reinterpret_cast<HeaderPreamble*>(&receiveBuffer[offset]);
numReceptionBlocks = 0;
totalReceiveSize = 0;
for (int i=0; i<MAX_DATA_BLOCKS; ++i) {
int s = ntohl(header->netTransferSizes[i]);
if (s > 0) {
blockReceiveSize[i] = s;
numReceptionBlocks++;
totalReceiveSize += s;
} else {
// first non-positive payload size signals end of blocks
//break;
}
}
}
if (numReceptionBlocks==0) throw std::runtime_error("Received a transfer with zero blocks");
if (numReceptionBlocks > MAX_DATA_BLOCKS) throw std::runtime_error("Received a transfer with too many blocks");
if(headerSize + headerExtraBytes > static_cast<int>(receiveBuffer.size())
|| totalReceiveSize < 0 || headerSize + headerExtraBytes > length ) {
throw ProtocolException("Received invalid header!");
}
headerReceived = true;
receivedHeader.assign(receiveBuffer.begin() + offset + headerExtraBytes,
receiveBuffer.begin() + offset + headerSize + headerExtraBytes);
resizeReceiveBuffer();
return headerSize + headerExtraBytes;
}
void DataBlockProtocol::resetReception(bool dropped) {
numReceptionBlocks = 0;
headerReceived = false;
missingReceiveSegments.clear();
receivedHeader.clear();
waitingForMissingSegments = false;
totalReceiveSize = 0;
finishedReception = false;
lostSegmentBytes = 0;
for (int i=0; i<MAX_DATA_BLOCKS; ++i) {
blockReceiveOffsets[i] = 0;
blockValidSize[i] = 0;
}
if(dropped) {
droppedReceptions++;
}
}
unsigned char* DataBlockProtocol::getReceivedData(int& length) {
length = 0;
if(missingReceiveSegments.size() > 0) {
length = min(length, missingReceiveSegments[0].offset);
}
return &receiveBuffer[0];
}
unsigned char* DataBlockProtocol::getReceivedHeader(int& length) {
if(receivedHeader.size() > 0) {
length = static_cast<int>(receivedHeader.size());
return &receivedHeader[0];
} else {
return nullptr;
}
}
bool DataBlockProtocol::processControlMessage(int length) {
if(length < static_cast<int>(sizeof(int) + 1)) {
return false;
}
int payloadLength = length - sizeof(int) - 1;
switch(receiveBuffer[0 + payloadLength]) {
case CONFIRM_MESSAGE:
// Our connection request has been accepted
connectionConfirmed = true;
break;
case CONNECTION_MESSAGE:
// We establish a new connection
connectionConfirmed = true;
confirmationMessagePending = true;
clientConnectionPending = true;
// A connection request is just as good as a heartbeat
lastReceivedHeartbeat = std::chrono::steady_clock::now();
break;
case HEADER_MESSAGE: {
if (anyPayloadReceived()) {
if (allBlocksDone()) {
LOG_ERROR("No EOF message received!");
} else {
LOG_ERROR("Received header too late/early!");
}
resetReception(true);
}
if(parseReceivedHeader(payloadLength, 0) == 0) {
throw ProtocolException("Received header is too short!");
}
}
break;
case EOF_MESSAGE:
// This is the end of the frame
if(anyPayloadReceived()) {
parseEofMessage(length);
}
break;
case RESEND_MESSAGE: {
// The client requested retransmission of missing packets
parseResendMessage(payloadLength);
break;
}
case HEARTBEAT_MESSAGE:
// A cyclic heartbeat message
lastReceivedHeartbeat = std::chrono::steady_clock::now();
break;
default:
throw ProtocolException("Received invalid control message!");
break;
}
return true;
}
bool DataBlockProtocol::isConnected() const {
if(protType == PROTOCOL_TCP) {
// Connection is handled by TCP and not by us
return true;
} else if(connectionConfirmed) {
return !isServer || std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - lastReceivedHeartbeat).count()
< 2*HEARTBEAT_INTERVAL_MS;
} else return false;
}
const unsigned char* DataBlockProtocol::getNextControlMessage(int& length) {
length = 0;
if(protType == PROTOCOL_TCP) {
// There are no control messages for TCP
return nullptr;
}
if(confirmationMessagePending) {
// Send confirmation message
confirmationMessagePending = false;
controlMessageBuffer[0] = CONFIRM_MESSAGE;
length = 1;
} else if(!isServer && std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - lastRemoteHostActivity).count() > RECONNECT_TIMEOUT_MS) {
// Send a new connection request
controlMessageBuffer[0] = CONNECTION_MESSAGE;
length = 1;
// Also update time stamps
lastRemoteHostActivity = lastSentHeartbeat = std::chrono::steady_clock::now();
} else if(transferHeaderData != nullptr && isConnected()) {
// We need to send a new protocol header
length = transferHeaderSize;
const unsigned char* ret = transferHeaderData;
transferHeaderData = nullptr;
return ret;
} else if(eofMessagePending) {
// Send end of frame message
eofMessagePending = false;
unsigned int networkOffset = htonl(mergeRawOffset(lastTransmittedBlock, transferSize[lastTransmittedBlock]));
memcpy(&controlMessageBuffer[0], &networkOffset, sizeof(int));
controlMessageBuffer[sizeof(int)] = EOF_MESSAGE;
length = 5;
} else if(resendMessagePending) {
// Send a re-send request for missing messages
resendMessagePending = false;
if(!generateResendRequest(length)) {
length = 0;
return nullptr;
}
} else if(!isServer && std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - lastSentHeartbeat).count() > HEARTBEAT_INTERVAL_MS) {
// Send a heartbeat message
controlMessageBuffer[0] = HEARTBEAT_MESSAGE;
length = 1;
lastSentHeartbeat = std::chrono::steady_clock::now();
} else {
return nullptr;
}
// Mark this message as a control message
controlMessageBuffer[length++] = 0xff;
controlMessageBuffer[length++] = 0xff;
controlMessageBuffer[length++] = 0xff;
controlMessageBuffer[length++] = 0xff;
return controlMessageBuffer;
}
bool DataBlockProtocol::newClientConnected() {
if(clientConnectionPending) {
clientConnectionPending = false;
return true;
} else {
return false;
}
}
bool DataBlockProtocol::generateResendRequest(int& length) {
length = static_cast<int>(missingReceiveSegments.size() * (sizeof(int) + sizeof(unsigned short)));
if(length + sizeof(int) + 1> sizeof(controlMessageBuffer)) {
return false;
}
length = 0;
for(MissingReceiveSegment segment: missingReceiveSegments) {
unsigned int segOffset = htonl(static_cast<unsigned int>(segment.offset));
unsigned int segLen = htonl(static_cast<unsigned int>(segment.length));
memcpy(&controlMessageBuffer[length], &segOffset, sizeof(segOffset));
length += sizeof(unsigned int);
memcpy(&controlMessageBuffer[length], &segLen, sizeof(segLen));
length += sizeof(unsigned int);
}
controlMessageBuffer[length++] = RESEND_MESSAGE;
return true;
}
void DataBlockProtocol::parseResendMessage(int length) {
missingTransferSegments.clear();
int num = length / (sizeof(unsigned int) + sizeof(unsigned short));
int bufferOffset = 0;
for(int i=0; i<num; i++) {
unsigned int segOffsetNet = *reinterpret_cast<unsigned int*>(&receiveBuffer[bufferOffset]);
bufferOffset += sizeof(unsigned int);
unsigned int segLenNet = *reinterpret_cast<unsigned int*>(&receiveBuffer[bufferOffset]);
bufferOffset += sizeof(unsigned int);
int segmentOffsetRaw = static_cast<int>(ntohl(segOffsetNet)); // with block ID
int segmentLength = static_cast<int>(ntohl(segLenNet));
int dataBlockID, segmentOffset;
splitRawOffset(segmentOffsetRaw, dataBlockID, segmentOffset);
if(segmentOffset >= 0 && segmentLength > 0 && (segmentOffset + segmentLength) <= rawValidBytes[dataBlockID]) {
missingTransferSegments.push_back(std::pair<int, int>(
segmentOffsetRaw, segmentLength));
}
}
}
void DataBlockProtocol::parseEofMessage(int length) {
completedReceptions++;
lostSegmentRate = (lostSegmentRate * (completedReceptions-1) + ((double) lostSegmentBytes) / totalReceiveSize) / completedReceptions;
if(length >= 4) {
// Find all missing segments at the end of blocks
for (int i=0; i<numReceptionBlocks; ++i) {
if (blockReceiveOffsets[i] < blockReceiveSize[i]) {
MissingReceiveSegment missingSeg;
missingSeg.offset = blockReceiveOffsets[i];
missingSeg.length = blockReceiveSize[i] - blockReceiveOffsets[i];
missingSeg.isEof = true;
missingReceiveSegments.push_back(missingSeg);
lostSegmentBytes += missingSeg.length;
}
}
if(missingReceiveSegments.size() > 0) {
waitingForMissingSegments = true;
resendMessagePending = true;
// Initialize all missing block start indices with earliest missing address
int mblock, moffset;
for (int i=0; i<static_cast<int>(missingReceiveSegments.size()); ++i) {
splitRawOffset(missingReceiveSegments[i].offset, mblock, moffset);
if (moffset < blockReceiveOffsets[mblock]) {
blockReceiveOffsets[mblock] = moffset;
}
}
} else {
finishedReception = true;
}
} else {
LOG_ERROR("EOF message too short, length " << length);
}
}
void DataBlockProtocol::resizeReceiveBuffer() {
if(totalReceiveSize < 0) {
throw ProtocolException("Received invalid transfer size!");
}
// We increase the requested size to allow for one
// additional network message and the protocol overhead
int bufferSize = 2*getMaxReceptionSize()
+ MAX_OUTSTANDING_BYTES + sizeof(int);
// Resize the buffer
if(static_cast<int>(receiveBuffer.size()) < bufferSize) {
receiveBuffer.resize(bufferSize);
}
for (int i=0; i<numReceptionBlocks; ++i) {
if (static_cast<int>(blockReceiveBuffers[i].size()) < blockReceiveSize[i]) {
blockReceiveBuffers[i].resize(blockReceiveSize[i]);
}
}
}
}} // namespace

View file

@ -0,0 +1,403 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_DATABLOCKPROTOCOL_H
#define VISIONTRANSFER_DATABLOCKPROTOCOL_H
#include <map>
#include <vector>
#include <memory>
#include <chrono>
#include <deque>
#include "visiontransfer/alignedallocator.h"
#include "visiontransfer/exceptions.h"
namespace visiontransfer {
namespace internal {
/**
* \brief A protocol for transmitting large blocks of data over a network.
*
* The protocol slices the large data block into several smaller chunks
* that can be transmitted over a network. A user defined header is
* always transmitted before this large data block.
*
* There are two different implementations for UDP and TCP. In UDP mode,
* packet loss is handled by performing a packet re-transmission. In TCP
* mode, data does not have to be received with the same packet size as
* it is sent out.
*
* This class is intended to be used by ImageProtocol and should normally
* not be used directly.
*/
class DataBlockProtocol {
public:
enum ProtocolType {
PROTOCOL_TCP,
PROTOCOL_UDP
};
//
static const int MAX_DATA_BLOCKS = 8;
// Constants that are also used in other places.
static const int MAX_TCP_BYTES_TRANSFER = 0xFFFF; //64K - 1
static const int MAX_UDP_RECEPTION = 0x4000; //16K
static const int MAX_OUTSTANDING_BYTES = 2*MAX_TCP_BYTES_TRANSFER;
#pragma pack(push,1)
// Extends previous one-channel 6-byte raw header buffer
// Legacy transfers can be detected via non-zero netTransferSizeDummy
struct HeaderPreamble {
uint16_t netHeaderSize;
int32_t netTransferSizeDummy; // layout compatibility, legacy detection
uint32_t netTransferSizes[MAX_DATA_BLOCKS]; // per-block total size
};
struct SegmentHeaderUDP {
uint32_t segmentOffset;
};
struct SegmentHeaderTCP {
uint32_t fragmentSize;
uint32_t segmentOffset;
};
#pragma pack(pop)
/**
* \brief Creates a new instance
*
* \param server If set to true, this object will be a communication server.
* \param protType The network transport protocol that is used.
* \param maxUdpPacketSize Maximum allowed size of a UDP packet when sending data.
*/
DataBlockProtocol(bool server, ProtocolType protType, int maxUdpPacketSize);
/**
* \brief Returns the size of the overhead data that is required for
* transferring a single network message.
*/
int getProtocolOverhead() const {
return protType == PROTOCOL_UDP ? sizeof(int) : 0;
}
/**
* \brief Returns the maximum payload size that can be received
*/
int getMaxReceptionSize() const;
/**
* \brief Resets all transfer related internal variables
*/
void resetTransfer();
/**
* \brief Sets a user-defined header that shall be transmitted with
* the next transfer
*
* \param data Pointer to the data of the header that should be
* transferred.
* \param headerSize Size of the data in \c data.
* \param transferSize Total size of the payload for the next transfer.
*
* This method must be called before setTransferData(). A call before
* the start of each transfer is necessary. There must be at least
* 6 additional bytes of reserved memory after the end and before the
* beginning of \c data.
*/
void setTransferHeader(unsigned char* data, int headerSize, int blocks);
/**
* \brief Sets the per-block transfer size
*
* \param bytes Size of the data pointed to with the matching setTransferData()
*
* Replaces the old single-buffer total size that was prepended to the
* second-level header alongside the header size.
*/
void setTransferBytes(int block, long bytes);
/**
* \brief Sets the payload data for the next transfer.
*
* \param data Pointer to the data that should be transferred.
* \param validBytes The number of bytes that are currently
* valid in \c data.
*
* Part of \c data will be overwritten. There must be at least 4 additional
* allocated bytes at the end of \c data.
*
* If \c validBytes is set to a value smaller than the total transfer
* size, only a partial transfer is performed. Subsequent calls to
* setTransferValidBytes() are then necessary.
*/
void setTransferData(int block, unsigned char* data, int validBytes = 0x7FFFFFFF);
/**
* \brief Updates the number of valid bytes in a partial transfer.
*
* \param validBytes The number of already valid bytes in the previously
* set data pointer.
*
* This method has to be called whenever new data is available in a
* partial transfer. \see setTransferData()
*/
void setTransferValidBytes(int block, int validBytes);
/**
* \brief Gets the next network message for the current transfer.
*
* \param length The length of the network message.
* \return Pointer to the network message data.
*
* If the transfer has already been completed or if there are currently
* no more valid bytes to be transmitted, a null pointer is returned.
*/
const unsigned char* getTransferMessage(int& length);
/**
* \brief Returns true if the current transfer has been completed.
*/
bool transferComplete();
/**
* \brief Gets a buffer for receiving the next network message.
*
* \param maxLength The expected maximum length that is required for
* receiving a network message.
*
* The returned buffer is a subsection of the internal receive buffer.
*/
unsigned char* getNextReceiveBuffer(int maxLength);
/**
* \brief Resets the message reception.
*
* \param dropped If true, then this reset is rated as an error and
* internal counter for dropped transfers is increased.
*/
void resetReception(bool dropped);
/**
* \brief Handles a received network message.
*
* \param length Length of the received network message.
* \param transferComplete Set to true if a new transfer is complete after
* receiving the current packet
*
* Please see ImageProtocol::processReceivedMessage() for further details.
*/
void processReceivedMessage(int length, bool& transferComplete);
/**
* \brief Returns the data that has been received for the current transfer.
*
* \param length Will be set to the number of bytes that have been received.
* \return Pointer to the buffer containing the received data.
*
* The received data is valid until receiving the first network
* message for a new transfer.
*/
unsigned char* getReceivedData(int& length);
/**
* \brief Returns the header data that has been received for the
* current transfer.
*
* \param length Will be set to the length of the header data in
* bytes.
* \return Pointer to the buffer containing the received header data.
*
* The received header data is valid until receiving the first network
* message for a new transfer.
*/
unsigned char* getReceivedHeader(int& length);
/**
* \brief Returns the internal counter of dropped transfers during
* reception.
*/
int getDroppedReceptions() const {
return droppedReceptions;
}
/**
* \brief Returns true if the last network message has established a
* new connection from a client
*
* For TCP this method always returns false as connections are
* handled by the transport protocol.
*/
bool newClientConnected();
/**
* \brief Returns true if a remote connection is established.
*
* For TCP this method always returns true as connections are
* handled by the transport protocol.
*/
bool isConnected() const;
/**
* \brief If a control message is pending to be transmitted, then
* the message data will be returned by this method.
*
* \param length Will be set to the length of the message.
* \return Pointer to the message data or NULL if no message is pending.
*
* Control messages are only used if the UDP transfer protocol is
* selected. For TCP this method always returns a null pointer.
*/
const unsigned char* getNextControlMessage(int& length);
unsigned char* getBlockReceiveBuffer(int block) {
if (block >= numReceptionBlocks) {
throw ProtocolException("Tried to get receive buffer beyond initialized block range");
}
return &blockReceiveBuffers[block][0];
}
int getBlockValidSize(int block) {
if (block >= numReceptionBlocks) {
throw ProtocolException("Tried to get valid buffer index beyond initialized block range");
}
return blockValidSize[block];
}
bool isBlockDone(int block) {
if (block >= numReceptionBlocks) {
throw ProtocolException("Tried to get completion status of uninitialized block");
}
return blockValidSize[block] >= blockReceiveSize[block];
}
bool allBlocksDone() {
for (int i=0; i<numReceptionBlocks; ++i) {
if (!isBlockDone(i)) return false;
}
return true;
}
bool anyPayloadReceived() {
for (int i=0; i<numReceptionBlocks; ++i) {
if (blockReceiveOffsets[i] > 0) return true;
}
return false;
}
std::string statusReport();
bool wasHeaderReceived() const {
return headerReceived;
}
private:
// The pimpl idiom is not necessary here, as this class is usually not
// used directly
struct MissingReceiveSegment {
int offset;
int length;
bool isEof;
unsigned char subsequentData[4];
};
static constexpr int HEARTBEAT_INTERVAL_MS = 1000;
static constexpr int RECONNECT_TIMEOUT_MS = 1000;
static constexpr unsigned char CONNECTION_MESSAGE = 0x01;
static constexpr unsigned char CONFIRM_MESSAGE = 0x02;
static constexpr unsigned char HEADER_MESSAGE = 0x03;
static constexpr unsigned char RESEND_MESSAGE = 0x04;
static constexpr unsigned char EOF_MESSAGE = 0x05;
static constexpr unsigned char HEARTBEAT_MESSAGE = 0x06;
bool isServer;
ProtocolType protType;
int maxPayloadSize;
int minPayloadSize;
// Transfer related variables
bool transferDone;
unsigned char* rawDataArr[MAX_DATA_BLOCKS];
int rawDataArrStrideHackOrig[MAX_DATA_BLOCKS];
int rawDataArrStrideHackRepl[MAX_DATA_BLOCKS];
int rawValidBytes[MAX_DATA_BLOCKS];
int transferOffset[MAX_DATA_BLOCKS];
int transferSize[MAX_DATA_BLOCKS];
char overwrittenTransferData[sizeof(SegmentHeaderTCP)];
int overwrittenTransferIndex;
int overwrittenTransferBlock;
unsigned char* transferHeaderData;
int transferHeaderSize;
int totalBytesCompleted;
int totalTransferSize;
int numTransferBlocks;
int lastTransmittedBlock;
// Reliability related variables
std::deque<MissingReceiveSegment> missingReceiveSegments;
std::deque<std::pair<int, int> > missingTransferSegments;
bool waitingForMissingSegments;
int totalReceiveSize;
unsigned char controlMessageBuffer[1024];
// Connection related variables
bool connectionConfirmed;
bool confirmationMessagePending;
bool eofMessagePending;
bool clientConnectionPending;
bool resendMessagePending;
std::chrono::steady_clock::time_point lastRemoteHostActivity;
std::chrono::steady_clock::time_point lastSentHeartbeat;
std::chrono::steady_clock::time_point lastReceivedHeartbeat;
// Reception related variables
std::vector<unsigned char, AlignedAllocator<unsigned char> > receiveBuffer;
std::vector<unsigned char, AlignedAllocator<unsigned char> > blockReceiveBuffers[MAX_DATA_BLOCKS];
int blockReceiveOffsets[MAX_DATA_BLOCKS];
int blockReceiveSize[MAX_DATA_BLOCKS];
int blockValidSize[MAX_DATA_BLOCKS];
std::vector<unsigned char> receivedHeader;
bool finishedReception;
int droppedReceptions;
int completedReceptions;
double lostSegmentRate;
int lostSegmentBytes;
unsigned char unprocessedMsgPart[MAX_OUTSTANDING_BYTES];
int unprocessedMsgLength;
bool headerReceived;
bool legacyTransfer;
int numReceptionBlocks;
int receiveOffset;
const unsigned char* extractPayload(const unsigned char* data, int& length, bool& error);
bool processControlMessage(int length);
void restoreTransferBuffer();
bool generateResendRequest(int& length);
void getNextTransferSegment(int& block, int& offset, int& length);
void parseResendMessage(int length);
void parseEofMessage(int length);
void integrateMissingUdpSegments(int block, int lastSegmentOffset, int lastSegmentSize);
void processReceivedUdpMessage(int length, bool& transferComplete);
void processReceivedTcpMessage(int length, bool& transferComplete);
void resizeReceiveBuffer();
int parseReceivedHeader(int length, int offset);
void zeroStructures();
void splitRawOffset(int rawSegmentOffset, int& dataBlockID, int& segmentOffset);
int mergeRawOffset(int dataBlockID, int segmentOffset, int reserved=0);
};
}} // namespace
#endif

View file

@ -0,0 +1,130 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_DATACHANNEL_CONTROL_H
#define VISIONTRANSFER_DATACHANNEL_CONTROL_H
#include <cstring>
#include <memory>
#include <map>
#include <set>
#include <vector>
#include <visiontransfer/datachannelservicebase.h>
namespace visiontransfer {
namespace internal {
/**
* \brief Commands understood on DataChannelService channel 0 (control)
*/
class DataChannelControlCommands {
public:
enum Command {
CTLReserved,
CTLRequestAdvertisement,
CTLProvideAdvertisement,
CTLRequestSubscriptions,
CTLProvideSubscriptions,
CTLRequestUnsubscriptions,
CTLProvideUnsubscriptions
};
};
/**
* \brief Internal helpers for packing and unpacking channel 0 service messages
*/
class DataChannelControlUtil {
public:
static DataChannelControlCommands::Command getCommand(unsigned char* data, int datalen) {
if (datalen < 2) throw std::runtime_error("Buffer too small");
return (DataChannelControlCommands::Command) ntohs(*((uint16_t*) data));
}
// Advertisements (available services)
static int packAdvertisementMessage(unsigned char* data, int datalen, DataChannelControlCommands::Command cmd, const std::map<DataChannel::ID, std::shared_ptr<DataChannel> >& channels) {
int origDataLen = datalen;
if (datalen < 3) throw std::runtime_error("Buffer too small");
*((uint16_t*)data) = htons(cmd);
uint8_t num = (uint8_t) std::min(255, (int) channels.size()); // pack 255 items max
data[2] = num;
// payload
data += 3; datalen -= 3;
int i = 0;
for (auto kv: channels) {
i++; if (i>num) break;
if (datalen < 3) throw std::runtime_error("Buffer too small");
auto p = kv.second;
const std::string& infoString = p->getInfoString();
uint8_t strSize = (uint8_t) std::min(255, (int) infoString.size());
int elemLen = 1 + 1 + 1 + strSize;
if (datalen < elemLen) throw std::runtime_error("Buffer too small");
data[0] = p->getChannelID();
data[1] = p->getChannelType();
data[2] = strSize;
std::memcpy(data + 3, infoString.c_str(), strSize);
data += elemLen; datalen -= elemLen;
}
return (origDataLen - datalen);
}
static std::vector<DataChannelInfo> unpackAdvertisementMessage(unsigned char* data, int datalen) {
std::vector<DataChannelInfo> result;
if (datalen < 3) throw std::runtime_error("Buffer too small");
uint8_t num = data[2];
data += 3; datalen -= 3;
for (int i=0; i<num; ++i) {
if (datalen < 3) throw std::runtime_error("Buffer too small");
uint8_t id = data[0];
uint8_t type = data[1];
uint8_t strSize = data[2];
int elemLen = 1 + 1 + 1 + strSize;
if (datalen < elemLen) throw std::runtime_error("Buffer too small");
result.emplace_back(DataChannelInfo((DataChannel::ID) id, (DataChannel::Type) type, std::string(data[3], strSize)));
data += elemLen; datalen -= elemLen;
}
return result;
}
// Subscriptions (connected services)
static int packSubscriptionMessage(unsigned char* data, int datalen, DataChannelControlCommands::Command cmd, const std::vector<DataChannel::ID>& subscriptions) {
if (datalen < 4) throw std::runtime_error("Buffer too small");
*((uint16_t*)data) = htons(cmd);
uint8_t num = (uint8_t) std::min(255, (int) subscriptions.size());
data[2] = num; // pack 255 items max
data += 3; datalen -= 3;
if (datalen < (1*num)) throw std::runtime_error("Buffer too small");
for (int i=0; i<num; ++i) {
auto p = subscriptions[i];
data[0] = p;
data += 1; datalen -= 1;
}
return (2+1+1*num);
}
static std::vector<DataChannel::ID> unpackSubscriptionMessage(unsigned char* data, int datalen) {
std::vector<DataChannel::ID> result;
if (datalen < 3) throw std::runtime_error("Buffer too small");
uint8_t num = data[2];
data += 3; datalen -= 3;
if (datalen < (1*num)) throw std::runtime_error("Buffer too small");
for (int i=0; i<num; ++i) {
result.emplace_back(static_cast<DataChannel::ID>(data[0]));
data += 1; datalen -= 1;
}
return result;
}
};
}} // namespaces
#endif

View file

@ -0,0 +1,157 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include <visiontransfer/datachannel-imu-bno080.h>
#include <visiontransfer/protocol-sh2-imu-bno080.h>
namespace visiontransfer {
namespace internal {
ClientSideDataChannelIMUBNO080::ClientSideDataChannelIMUBNO080()
: DataChannel() {
infoString = "Receiver for the BNO080 IMU sensor";
// Sane defaults for orientation etc. if values are queried despite lack of sensor
lastXYZ[0x01 - 1] = {0, 0, 0, 0, 0, 10};
lastXYZ[0x02 - 1] = {0, 0, 0, 0, 0, 0};
lastXYZ[0x03 - 1] = {0, 0, 0, 0, 0, 0};
lastXYZ[0x04 - 1] = {0, 0, 0, 0, 0, 0};
lastXYZ[0x05 - 1] = {0, 0, 0, 0, 0, 0}; // unused, cf. the quaternion below
lastXYZ[0x06 - 1] = {0, 0, 0, 0, 0, 10};
lastScalar[0x0a - 0x0a] = {0, 0, 0, 0};
lastScalar[0x0b - 0x0a] = {0, 0, 0, 0}; // unused / sensor not present
lastScalar[0x0d - 0x0a] = {0, 0, 0, 0};
lastScalar[0x0d - 0x0a] = {0, 0, 0, 0}; // unused / sensor not present
lastScalar[0x0e - 0x0a] = {0, 0, 0, 0};
lastRotationQuaternion = {0, 0, 0, 0.0, 0.0, 0.0, 1.0, 0}; // channel 0x05
}
int ClientSideDataChannelIMUBNO080::handleSensorInputRecord(unsigned char* data, int datalen, uint64_t baseTime) {
int sensorid = data[0];
int status = data[2] & 3;
int delay = ((data[2] & 0xfc) << 6) | data[3];
uint64_t myTime = baseTime + delay;
switch (sensorid) {
// these have identical format, 3D vector
case SH2Constants::SENSOR_ACCELEROMETER: //0x01
case SH2Constants::SENSOR_GYROSCOPE: //0x02
case SH2Constants::SENSOR_MAGNETOMETER: //0x03
case SH2Constants::SENSOR_LINEAR_ACCELERATION: //0x04
case SH2Constants::SENSOR_GRAVITY: //0x06
{
double x, y, z;
auto q = sh2GetSensorQPoint(sensorid);
x = sh2ConvertFixedQ16(sh2GetU16(data+4), q);
y = sh2ConvertFixedQ16(sh2GetU16(data+6), q);
z = sh2ConvertFixedQ16(sh2GetU16(data+8), q);
// sensorid-1 is in range [0..5]
lastXYZ[sensorid-1] = TimestampedVector((int) (myTime/1000000), (int) (myTime%1000000), status, x, z, -y);
ringbufXYZ[sensorid-1].pushData(lastXYZ[sensorid-1]);
break;
}
// this one is 4D (quaternion data), plus accuracy field
case SH2Constants::SENSOR_ROTATION_VECTOR: //0x05
case SH2Constants::SENSOR_GAME_ROTATION_VECTOR://0x08
case SH2Constants::SENSOR_GEOMAGNETIC_ROTATION://0x09
{
double x, y, z, w;
double accuracy = -1.0;
auto q = sh2GetSensorQPoint(sensorid);
x = sh2ConvertFixedQ16(sh2GetU16(data+4), q);
y = sh2ConvertFixedQ16(sh2GetU16(data+6), q);
z = sh2ConvertFixedQ16(sh2GetU16(data+8), q);
w = sh2ConvertFixedQ16(sh2GetU16(data+10), q);
if (sensorid!=SH2Constants::SENSOR_GAME_ROTATION_VECTOR) {
// The BNO080 'game rotation vectors' to not provide an accuracy estimate
// (since they do not estimate yaw in a fixed geomagnetic system).
accuracy = (double) ((signed short) sh2GetU16(data+12)) / (double) (1 << 12); // accuracy Q point is 12
}
lastRotationQuaternion = TimestampedQuaternion((int) (myTime/1000000), (int) (myTime%1000000), status, x, z, -y, w, accuracy);
ringbufRotationQuaternion.pushData(lastRotationQuaternion);
break;
}
// the misc. sensors are 1D floats (32b or 16b)
case SH2Constants::SENSOR_PRESSURE: // 0x0a
case SH2Constants::SENSOR_AMBIENT_LIGHT: // 0x0b
{
signed short svalue = sh2GetU32(data+4);
double value = (double) svalue / (double)(1 << sh2GetSensorQPoint(sensorid));
lastScalar[sensorid - 0x0a] = TimestampedScalar((int) (myTime/1000000), (int) (myTime%1000000), status, value);
ringbufScalar[sensorid - 0x0a].pushData(lastScalar[sensorid - 0x0a]);
break;
}
case SH2Constants::SENSOR_HUMIDITY: // 0x0c
case SH2Constants::SENSOR_PROXIMITY: // 0x0d
case SH2Constants::SENSOR_TEMPERATURE: // 0x0e
{
signed short svalue = sh2GetU16(data+4);
double value = (double) svalue / (double)(1 << sh2GetSensorQPoint(sensorid));
lastScalar[sensorid - 0x0a] = TimestampedScalar((int) (myTime/1000000), (int) (myTime%1000000), status, value);
ringbufScalar[sensorid - 0x0a].pushData(lastScalar[sensorid - 0x0a]);
break;
}
default:
break;
}
int recordlen = sh2GetSensorReportLength(sensorid);
return recordlen;
}
void ClientSideDataChannelIMUBNO080::handleChunk(unsigned char* data, int datalen) {
if (datalen < 5) return;
auto cargobase = reinterpret_cast<SH2CargoBase*>(data);
static uint64_t interruptTime = 0; // will always be reported first, below
switch (cargobase->getReportType()) {
case 0xff: { // Our own interrupt-synchronized timestamp
auto report = reinterpret_cast<SH2CargoBodyScenescanTimestamp*>(data);
interruptTime = report->getUSecSinceEpoch();
break;
}
case 0xfb: { // SH-2 Time Base (followed by sensor reports)
auto report = reinterpret_cast<SH2CargoBodyTimeBase*>(data);
long basetimeOfs = report->getTimeBase();
uint64_t localBase = interruptTime - basetimeOfs;
data += sizeof(SH2CargoBodyTimeBase); datalen -= sizeof(SH2CargoBodyTimeBase);
// The (variable-length) remainder of this packet are concatenated SH2 sensor input reports.
// They must be parsed in order since they are of differing sizes, depending on the sensor type.
int recordlen;
while (datalen > 0) {
recordlen = handleSensorInputRecord(data, datalen, localBase);
if (recordlen<1) break; // record type unknown -> size unknown -> cannot proceed
data += recordlen; datalen -= recordlen;
}
break;
}
case 0xfa: // SH-2 Timestamp Rebase
// Required for BNO batch reports that span >1.6s.
// This is not relevant here, since we set the batch delay to intervals
// considerably shorter than that (the server stores those batches
// immediately with integrated base timestamps).
default: {
}
}
}
int ClientSideDataChannelIMUBNO080::handleMessage(DataChannelMessage& message, sockaddr_in* sender) {
unsigned char* data = message.payload;
int datalen = message.header.payloadSize;
while (datalen > 0) {
int elemlen = sh2GetU16(data) & 0x7fff;
handleChunk(data, elemlen);
data += elemlen; datalen -= elemlen;
}
return 1;
};
}} // namespaces

View file

@ -0,0 +1,89 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
// DataChannel protocol definition for the BNO080 IMU sensor
#ifndef VISIONTRANSFER_DATACHANNEL_IMU_BNO080_H
#define VISIONTRANSFER_DATACHANNEL_IMU_BNO080_H
#include <visiontransfer/datachannelservicebase.h>
#include <visiontransfer/sensorringbuffer.h>
#include <vector>
namespace visiontransfer {
namespace internal {
// TODO IMPLEMENT_ME / Not active at the moment:
/**
* \brief Commands to control the IMU/environmental sensor and its data reporting
*/
class DataChannelIMUBNO080Commands {
public:
enum Command {
BNOReserved,
BNOReset,
BNOEnableSensors,
BNORateLimit,
BNOReports
};
};
/**
* \brief Helper functions for IMU control messages
*/
class DataChannelIMUBNO080Util {
static DataChannelIMUBNO080Commands::Command getCommand(unsigned char* data, int datalen) {
if (datalen < 2) throw std::runtime_error("Buffer too small");
return (DataChannelIMUBNO080Commands::Command) ntohs(*((uint16_t*) data));
}
static int packResetMessage(unsigned char* data, int datalen) {
if (datalen < 2) throw std::runtime_error("Buffer too small");
*((uint16_t*)data) = htons(DataChannelIMUBNO080Commands::BNOReset);
return 2;
}
};
/**
* \brief Encapsulated receiver with ring buffers for IMU / environment sensor data.
*
* Public access transparently via DataChannelService
*/
class ClientSideDataChannelIMUBNO080: public DataChannel {
private:
static constexpr int RINGBUFFER_SIZE = 2048;
public:
// These are inspected and consumed by the DataChannelService
SensorDataRingBuffer<TimestampedVector, RINGBUFFER_SIZE> ringbufXYZ[6]; // for sensors 0x01 .. 0x06 (w/o 5)
TimestampedVector lastXYZ[6]; // cache the most recent value for each channel
SensorDataRingBuffer<TimestampedQuaternion, RINGBUFFER_SIZE> ringbufRotationQuaternion; // for 0x05, Rot Vec
TimestampedQuaternion lastRotationQuaternion;
SensorDataRingBuffer<TimestampedScalar, RINGBUFFER_SIZE> ringbufScalar[5]; // 0x0a .. 0x0e (temp, pressure..)
TimestampedScalar lastScalar[5];
public:
ClientSideDataChannelIMUBNO080();
DataChannel::Type getChannelType() const override { return DataChannel::Types::BNO080; }
int handleSensorInputRecord(unsigned char* data, int datalen, uint64_t baseTime);
void handleChunk(unsigned char* data, int datalen);
int handleMessage(DataChannelMessage& message, sockaddr_in* sender) override;
bool initialize() override { return true; }
int startService() override { return 1; }
int stopService() override { return 1; }
};
}} // namespaces
#endif

View file

@ -0,0 +1,242 @@
#include <sys/types.h>
#include <cstring>
#include <stdexcept>
#include <fcntl.h>
#include <fstream>
#include <visiontransfer/internalinformation.h>
#include <visiontransfer/networking.h>
#include <visiontransfer/datachannelservicebase.h>
#include <visiontransfer/datachannel-control.h>
#include <visiontransfer/datachannelservice.h>
#include <visiontransfer/datachannel-imu-bno080.h>
#include <visiontransfer/protocol-sh2-imu-bno080.h> // for sensor constants
#include <iostream>
#include <memory>
#include <functional>
#include <thread>
#include <mutex>
#include <chrono>
#ifdef _WIN32
#include <ws2tcpip.h>
#endif
using namespace visiontransfer;
using namespace visiontransfer::internal;
namespace visiontransfer {
namespace internal {
class DataChannelServiceImpl: public DataChannelServiceBase {
private:
sockaddr_in serverAddr;
//
std::shared_ptr<std::thread> receiverThread;
unsigned long pollDelay;
//
std::shared_ptr<ClientSideDataChannelIMUBNO080> channelBNO080;
//
int handleChannel0Message(DataChannelMessage& message, sockaddr_in* sender) override;
void initiateHandshake();
void subscribeAll();
void unsubscribeAll();
void receiverRoutine();
public:
bool threadRunning;
std::vector<DataChannelInfo> channelsAvailable;
std::map<DataChannel::Type, std::set<DataChannel::ID>> channelsAvailableByType;
public:
DataChannelServiceImpl(DeviceInfo deviceInfo);
DataChannelServiceImpl(const char* ipAddr);
virtual ~DataChannelServiceImpl() { }
void launch(unsigned long pollDelayUSec);
public:
// High-level data channels API
TimestampedQuaternion getLastRotationQuaternion() {
return channelBNO080->lastRotationQuaternion;
}
std::vector<TimestampedQuaternion> getRotationQuaternionSeries(int fromSec, int fromUSec, int untilSec, int untilUSec) {
return channelBNO080->ringbufRotationQuaternion.popBetweenTimes(fromSec, fromUSec, untilSec, untilUSec);
}
TimestampedVector getLastSensorVector(int idx) {
return channelBNO080->lastXYZ[idx - 1];
}
std::vector<TimestampedVector> getSensorVectorSeries(int idx, int fromSec, int fromUSec, int untilSec, int untilUSec) {
return channelBNO080->ringbufXYZ[idx - 1].popBetweenTimes(fromSec, fromUSec, untilSec, untilUSec);
}
TimestampedScalar getLastSensorScalar(int idx) {
return channelBNO080->lastScalar[idx - 0x0a];
}
std::vector<TimestampedScalar> getSensorScalarSeries(int idx, int fromSec, int fromUSec, int untilSec, int untilUSec) {
return channelBNO080->ringbufScalar[idx - 0x0a].popBetweenTimes(fromSec, fromUSec, untilSec, untilUSec);
}
};
} // internal namespace
class DataChannelService::Pimpl {
public:
std::shared_ptr<internal::DataChannelServiceImpl> impl;
Pimpl(DeviceInfo deviceInfo) {
impl = std::make_shared<internal::DataChannelServiceImpl>(deviceInfo);
}
Pimpl(const char* ipAddress) {
impl = std::make_shared<internal::DataChannelServiceImpl>(ipAddress);
}
};
void internal::DataChannelServiceImpl::receiverRoutine() {
threadRunning = true;
while (threadRunning) {
process();
std::this_thread::sleep_for(std::chrono::microseconds(pollDelay));
}
}
void internal::DataChannelServiceImpl::launch(unsigned long pollDelayUSec) {
// Prepare our receivers (all supported channels aside from service channel 0)
channelBNO080 = std::make_shared<ClientSideDataChannelIMUBNO080>();
registerChannel(channelBNO080);
// Prepare our poll thread
pollDelay = pollDelayUSec;
receiverThread = std::make_shared<std::thread>(std::bind(&internal::DataChannelServiceImpl::receiverRoutine, this));
receiverThread->detach();
// Say hello to the device to get a channel advertisement
initiateHandshake();
}
void internal::DataChannelServiceImpl::initiateHandshake() {
uint16_t cmd = htons((uint16_t) DataChannelControlCommands::CTLRequestAdvertisement);
sendDataIsolatedPacket((DataChannel::ID) 0x00, DataChannel::Types::CONTROL, (unsigned char*) &cmd, sizeof(cmd), &serverAddr);
}
void internal::DataChannelServiceImpl::subscribeAll() {
unsigned char data[1024];
int len = DataChannelControlUtil::packSubscriptionMessage(data, 1024, DataChannelControlCommands::CTLRequestSubscriptions, {0});
sendDataIsolatedPacket((DataChannel::ID) 0x00, DataChannel::Types::CONTROL, data, len, &serverAddr);
}
void internal::DataChannelServiceImpl::unsubscribeAll() {
unsigned char data[1024];
int len = DataChannelControlUtil::packSubscriptionMessage(data, 1024, DataChannelControlCommands::CTLRequestUnsubscriptions, {0});
sendDataIsolatedPacket((DataChannel::ID) 0x00, DataChannel::Types::CONTROL, data, len, &serverAddr);
}
int internal::DataChannelServiceImpl::handleChannel0Message(DataChannelMessage& message, sockaddr_in* sender) {
auto cmd = DataChannelControlUtil::getCommand(message.payload, message.header.payloadSize);
switch (cmd) {
case DataChannelControlCommands::CTLProvideAdvertisement: {
// Update the available channels lists for run-time checks etc.
channelsAvailable = DataChannelControlUtil::unpackAdvertisementMessage(message.payload, message.header.payloadSize);
for (auto& dci: channelsAvailable) {
channelsAvailableByType[dci.getChannelType()].insert(dci.getChannelID());
}
// Automatic subscribeAll is suitable for now
subscribeAll();
break;
}
case DataChannelControlCommands::CTLProvideSubscriptions: {
break;
}
default: {
break;
}
}
return 1;
}
internal::DataChannelServiceImpl::DataChannelServiceImpl(DeviceInfo deviceInfo)
: DataChannelServiceImpl::DataChannelServiceImpl(deviceInfo.getIpAddress().c_str())
{}
internal::DataChannelServiceImpl::DataChannelServiceImpl(const char* ipAddress)
: DataChannelServiceBase(), threadRunning(false) {
serverAddr.sin_family = AF_INET;
serverAddr.sin_port = htons(InternalInformation::DATACHANNELSERVICE_PORT);
auto result = inet_addr(ipAddress);
if (result == INADDR_NONE) {
throw std::runtime_error("Failed to set address for DataChannelService");
}
serverAddr.sin_addr.s_addr = result;
//
//if (!inet_pton(AF_INET, deviceInfo.getIpAddress().c_str(), &(serverAddr.sin_addr))) {
// throw std::runtime_error("Failed to set address for DataChannelService");
//}
}
DataChannelService::DataChannelService(DeviceInfo deviceInfo, unsigned long pollDelayUSec) {
pimpl = new DataChannelService::Pimpl(deviceInfo);
pimpl->impl->launch(pollDelayUSec);
}
DataChannelService::DataChannelService(const char* ipAddress, unsigned long pollDelayUSec) {
pimpl = new DataChannelService::Pimpl(ipAddress);
pimpl->impl->launch(pollDelayUSec);
}
DataChannelService::~DataChannelService() {
pimpl->impl->threadRunning = false;
delete pimpl;
}
bool DataChannelService::imuAvailable() {
return pimpl->impl->channelsAvailableByType.count(DataChannel::Types::BNO080);
}
// High-level IMU accessors (C++-98 compatible signatures)
// For devices not providing IMU data, these return placeholder defaults
TimestampedQuaternion DataChannelService::imuGetRotationQuaternion() {
return pimpl->impl->getLastRotationQuaternion();
}
std::vector<TimestampedQuaternion> DataChannelService::imuGetRotationQuaternionSeries(int fromSec, int fromUSec, int untilSec, int untilUSec) {
return pimpl->impl->getRotationQuaternionSeries(fromSec, fromUSec, untilSec, untilUSec);
}
TimestampedVector DataChannelService::imuGetAcceleration() {
return pimpl->impl->getLastSensorVector(SH2Constants::SENSOR_ACCELEROMETER);
}
std::vector<TimestampedVector> DataChannelService::imuGetAccelerationSeries(int fromSec, int fromUSec, int untilSec, int untilUSec) {
return pimpl->impl->getSensorVectorSeries(SH2Constants::SENSOR_ACCELEROMETER, fromSec, fromUSec, untilSec, untilUSec);
}
TimestampedVector DataChannelService::imuGetGyroscope() {
return pimpl->impl->getLastSensorVector(SH2Constants::SENSOR_GYROSCOPE);
}
std::vector<TimestampedVector> DataChannelService::imuGetGyroscopeSeries(int fromSec, int fromUSec, int untilSec, int untilUSec) {
return pimpl->impl->getSensorVectorSeries(SH2Constants::SENSOR_GYROSCOPE, fromSec, fromUSec, untilSec, untilUSec);
}
TimestampedVector DataChannelService::imuGetMagnetometer() {
return pimpl->impl->getLastSensorVector(SH2Constants::SENSOR_MAGNETOMETER);
}
std::vector<TimestampedVector> DataChannelService::imuGetMagnetometerSeries(int fromSec, int fromUSec, int untilSec, int untilUSec) {
return pimpl->impl->getSensorVectorSeries(SH2Constants::SENSOR_MAGNETOMETER, fromSec, fromUSec, untilSec, untilUSec);
}
TimestampedVector DataChannelService::imuGetLinearAcceleration() {
return pimpl->impl->getLastSensorVector(SH2Constants::SENSOR_LINEAR_ACCELERATION);
}
std::vector<TimestampedVector> DataChannelService::imuGetLinearAccelerationSeries(int fromSec, int fromUSec, int untilSec, int untilUSec) {
return pimpl->impl->getSensorVectorSeries(SH2Constants::SENSOR_LINEAR_ACCELERATION, fromSec, fromUSec, untilSec, untilUSec);
}
TimestampedVector DataChannelService::imuGetGravity() {
return pimpl->impl->getLastSensorVector(SH2Constants::SENSOR_GRAVITY);
}
std::vector<TimestampedVector> DataChannelService::imuGetGravitySeries(int fromSec, int fromUSec, int untilSec, int untilUSec) {
return pimpl->impl->getSensorVectorSeries(SH2Constants::SENSOR_GRAVITY, fromSec, fromUSec, untilSec, untilUSec);
}
} // visiontransfer namespace

View file

@ -0,0 +1,144 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_DATACHANNELSERVICE_H
#define VISIONTRANSFER_DATACHANNELSERVICE_H
#include <vector>
#include <visiontransfer/common.h>
#include <visiontransfer/deviceinfo.h>
#include <visiontransfer/sensordata.h>
using namespace visiontransfer;
namespace visiontransfer {
/**
* This is the DataChannelServer public API, backwards compatible with C++-98
*
* The DataChannelService encapsulates miscellaneous device services,
* which are not implemented on all Nerian devices. Availability can be
* queried; access to unavailable elements will normally return default
* values or silently execute nothing.
*
* The imuGet...() and envGet...() functions access an inertial measurement unit
* with attached environmental sensor, realized with the Hillcrest BNO080
* and Bosch BME280, respectively, on supported devices.
*
*/
class VT_EXPORT DataChannelService {
public:
class Pimpl;
/**
* Initialize a new background data channel service, connecting to the
* specified device. The optional argument pollDelayUSec is used in the
* background receive/update loop, the default of 1000us can be overridden
* for the sake of efficiency or if minimum latency requirements differ.
*/
DataChannelService(DeviceInfo deviceInfo, unsigned long pollDelayUSec=1000);
/**
* Initialize a new background data channel service, connecting to the
* specified IP address. The optional argument pollDelayUSec is used in the
* background receive/update loop, the default of 1000us can be overridden
* for the sake of efficiency or if minimum latency requirements differ.
*/
DataChannelService(const char* ipAddr, unsigned long pollDelayUSec=1000);
~DataChannelService();
public:
/**
* \brief Return whether the device will provide data from an Inertial Measurement Unit
*/
bool imuAvailable();
/**
* \brief Return the most recent rotation quaternion, relative to gravity and magnetic north
*/
TimestampedQuaternion imuGetRotationQuaternion();
/**
* \brief Return the current contents of the rotation quaternion data buffer, optionally between specified timestamps.
*
* This operation consumes an internal ring buffer up to the desired end stamp, data older than the desired window is silently discarded.
*/
std::vector<TimestampedQuaternion> imuGetRotationQuaternionSeries(int fromSec=0, int fromUSec=0, int untilSec=0x7FFFffffl, int untilUSec=0x7FFFffffl);
/**
* \brief Return the most recent calibrated accelerometer reading
*/
TimestampedVector imuGetAcceleration();
/**
* \brief Return the current contents of the calibrated accelerometer data buffer, optionally between specified timestamps.
*
* This operation consumes an internal ring buffer up to the desired end stamp, data older than the desired window is silently discarded.
*/
std::vector<TimestampedVector> imuGetAccelerationSeries(int fromSec=0, int fromUSec=0, int untilSec=0x7FFFffffl, int untilUSec=0x7FFFffffl);
/**
* \brief Return the most recent calibrated angular accelerations from the gyroscope
*/
TimestampedVector imuGetGyroscope();
/**
* \brief Return the current contents of the gyroscope data buffer, optionally between specified timestamps.
*
* This operation consumes an internal ring buffer up to the desired end stamp, data older than the desired window is silently discarded.
*/
std::vector<TimestampedVector> imuGetGyroscopeSeries(int fromSec=0, int fromUSec=0, int untilSec=0x7FFFffffl, int untilUSec=0x7FFFffffl);
/**
* \brief Return the most recent magnetometer readings
*/
TimestampedVector imuGetMagnetometer();
/**
* \brief Return the current contents of the magnetometer data buffer, optionally between specified timestamps.
*
* This operation consumes an internal ring buffer up to the desired end stamp, data older than the desired window is silently discarded.
*/
std::vector<TimestampedVector> imuGetMagnetometerSeries(int fromSec=0, int fromUSec=0, int untilSec=0x7FFFffffl, int untilUSec=0x7FFFffffl);
/**
* \brief Return the most recent linear acceleration, i.e. with gravity factored out
*/
TimestampedVector imuGetLinearAcceleration();
/**
* \brief Return the current contents of the linear acceleration (without gravity) data buffer, optionally between specified timestamps.
*
* This operation consumes an internal ring buffer up to the desired end stamp, data older than the desired window is silently discarded.
*/
std::vector<TimestampedVector> imuGetLinearAccelerationSeries(int fromSec=0, int fromUSec=0, int untilSec=0x7FFFffffl, int untilUSec=0x7FFFffffl);
/**
* \brief Return the most recent gravity measurement
*/
TimestampedVector imuGetGravity();
/**
* \brief Return the current contents of the gravity data buffer, optionally between specified timestamps.
*
* This operation consumes an internal ring buffer up to the desired end stamp, data older than the desired window is silently discarded.
*/
std::vector<TimestampedVector> imuGetGravitySeries(int fromSec=0, int fromUSec=0, int untilSec=0x7FFFffffl, int untilUSec=0x7FFFffffl);
private:
Pimpl* pimpl;
};
} // namespaces
#endif

View file

@ -0,0 +1,135 @@
#include <sys/types.h>
#include <cstring>
#include <stdexcept>
#include <fcntl.h>
#include <fstream>
#include <visiontransfer/internalinformation.h>
#include <visiontransfer/networking.h>
#include <visiontransfer/datachannelservicebase.h>
#include <iostream>
using namespace visiontransfer;
using namespace visiontransfer::internal;
namespace visiontransfer {
namespace internal {
DataChannelServiceBase::DataChannelServiceBase() {
// Create socket
if((dataChannelSocket = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) {
throw std::runtime_error("Error creating data channel service socket!");
}
Networking::enableReuseAddress(dataChannelSocket, true);
// Bind to port
sockaddr_in localAddr;
memset(&localAddr, 0, sizeof(localAddr));
localAddr.sin_family = AF_INET;
localAddr.sin_port = htons(InternalInformation::DATACHANNELSERVICE_PORT);
localAddr.sin_addr.s_addr = htonl(INADDR_ANY);
if(::bind(dataChannelSocket, (sockaddr *)&localAddr, sizeof(localAddr)) != 0) {
throw std::runtime_error("Error binding dataChannel socket!");
}
Networking::setSocketBlocking(dataChannelSocket, false);
}
DataChannelServiceBase::~DataChannelServiceBase() {
Networking::closeSocket(dataChannelSocket);
}
void DataChannelServiceBase::process() {
static unsigned char buffer[100000];
static sockaddr_in senderAddress;
static socklen_t senderLength = (socklen_t) sizeof(senderAddress);
int received;
while (true) {
// socket is non-blocking
received = recvfrom(dataChannelSocket, (char*) buffer, sizeof(buffer), 0, (sockaddr *)&senderAddress, &senderLength);
if ((received > 0) && ((unsigned)received >= sizeof(DataChannelMessageHeader))) {
DataChannelMessageHeader* raw = reinterpret_cast<DataChannelMessageHeader*>(buffer);
DataChannelMessage message;
message.header.channelID = (DataChannel::ID) raw->channelID;
message.header.channelType = (DataChannel::Type) raw->channelType;
message.header.payloadSize = ntohl(raw->payloadSize);
message.payload = buffer + sizeof(DataChannelMessageHeader);
if ((sizeof(DataChannelMessageHeader) + message.header.payloadSize) != (unsigned) received) {
std::cerr << "DataChannelServiceBase: Size mismatch in UDP message, type " << (int) message.header.channelType << " ID " << (int) message.header.channelID << " - discarded!" << std::endl;
} else {
if (!(message.header.channelType)) {
handleChannel0Message(message, &senderAddress);
} else {
// Try to find a matching registered channel to handle the message
auto it = channels.find(message.header.channelID);
if (it != channels.end()) {
it->second->handleMessage(message, &senderAddress);
}
}
}
} else {
break;
}
// Call channel process() iterations
for (auto& kv: channels) {
kv.second->process();
}
}
}
// Actually send data, buffer must be stable
int DataChannelServiceBase::sendDataInternal(unsigned char* compiledMessage, unsigned int messageSize, sockaddr_in* recipient) {
if (!recipient) throw std::runtime_error("Requested sendDataInternal without recipient address");
if (messageSize < sizeof(DataChannelMessageHeader)) throw std::runtime_error("Message header too short");
DataChannelMessageHeader* header = reinterpret_cast<DataChannelMessageHeader*>(compiledMessage);
unsigned int reportedSize = sizeof(DataChannelMessageHeader) + ntohl(header->payloadSize);
if (messageSize != reportedSize) throw std::runtime_error("Message size does not match");
int result = 0;
result = sendto(dataChannelSocket, (char*) compiledMessage, reportedSize, 0, (sockaddr*) recipient, sizeof(*recipient));
if (result != (int) reportedSize) {
std::cerr << "Error sending DataChannel message to " << inet_ntoa(recipient->sin_addr) << ": " << strerror(errno) << std::endl;
throw std::runtime_error("Error during sendto");
}
return result;
}
// Generate a new message and send it
int DataChannelServiceBase::sendDataIsolatedPacket(DataChannel::ID id, DataChannel::Type type, unsigned char* data, unsigned int dataSize, sockaddr_in* recipient) {
unsigned int msgSize = sizeof(DataChannelMessageHeader) + dataSize;
unsigned char* buf = new unsigned char[msgSize]();
DataChannelMessageHeader* header = reinterpret_cast<DataChannelMessageHeader*>(buf);
header->channelID = id;
header->channelType = type;
header->payloadSize = htonl(dataSize);
std::memcpy(buf + sizeof(DataChannelMessageHeader), data, dataSize);
int result = sendDataInternal(buf, msgSize, recipient);
delete[] buf;
return result;
}
DataChannel::ID DataChannelServiceBase::registerChannel(std::shared_ptr<DataChannel> channel) {
// Preliminary implementation: set id:=type (should allocate dynamic IDs later)
DataChannel::ID id = (DataChannel::ID) channel->getChannelType();
if (channels.count(id)) {
return 0; // already registered this ID
}
// Checking dynamic init, if this fails the service is not registered (and will be auto cleaned)
if (!channel->initialize()) return 0;
channel->setChannelID(id);
channels[id] = channel;
channel->setService(shared_from_this());
return id;
}
int DataChannel::sendData(unsigned char* data, unsigned int dataLen, sockaddr_in* recipient) {
if (auto srv = service.lock()) {
return srv->sendDataIsolatedPacket(channelID, getChannelType(), data, dataLen, recipient);
} else return 0;
}
}} // namespaces

View file

@ -0,0 +1,145 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include <memory>
#include <map>
#include <set>
#include <string>
#include <vector>
#include <algorithm>
#ifdef _WIN32
#include <winsock2.h>
#else
#include <arpa/inet.h>
#endif
#ifndef VISIONTRANSFER_DATACHANNELSERVICEBASE_H
#define VISIONTRANSFER_DATACHANNELSERVICEBASE_H
namespace visiontransfer {
namespace internal {
/*
* This is a header file for internal use, please use the wrappers from datachannelservice.h
*/
#pragma pack(push,1)
/**
* \brief Transport-level DataChannel header
*/
struct DataChannelMessageHeader {
uint8_t channelID;
uint8_t channelType;
uint32_t payloadSize;
};
struct DataChannelMessage {
DataChannelMessageHeader header;
unsigned char* payload;
};
#pragma pack(pop)
class DataChannelServiceBase;
/**
* \brief Base class all data channel services derive from (once on the server side, once on the API side)
*/
class DataChannel {
public:
/**
* \brief Known data channel service types, not all may be active on a specific device
*/
struct Types {
enum DataChannelTypesEnum {
CONTROL = 0x00,
BNO080 = 0x01,
UNDEFINED = 0xff
};
};
typedef unsigned char Type;
typedef unsigned char ID;
inline DataChannel(): infoString("RESERVED") { }
inline virtual ~DataChannel() {}
inline ID getChannelID() const { return channelID; }
inline std::string getInfoString() const { return infoString; }
inline void setService(std::weak_ptr<DataChannelServiceBase> serv) { service = serv; }
inline void setChannelID(ID id) { channelID = id; }
virtual Type getChannelType() const = 0;
/// \brief Channel-dependent message handlers in respective channel implementations
virtual int handleMessage(DataChannelMessage& message, sockaddr_in* sender) = 0;
/// \brief When initialize() implementations return false, the service will be deactivated
virtual bool initialize() = 0;
/// \brief startService() implementations can start devices, launch an IO-blocked worker thread etc.
virtual int startService() = 0;
/** \brief A single processing iteration; should be short and must not block.
* Actual frequency determined by the thread calling DataChannelServiceBase::process()
*/
virtual bool process() { return true; }
virtual int stopService() = 0;
protected:
std::string infoString;
int sendData(unsigned char* data, unsigned int dataSize, sockaddr_in* recipient=nullptr);
private:
ID channelID;
std::weak_ptr<DataChannelServiceBase> service;
};
/**
* \brief API-level data channel info for advertisements and subscription accounting
*/
class DataChannelInfo {
public:
inline DataChannelInfo(DataChannel::ID id, DataChannel::Type type, const std::string& info): channelID(id), channelType(type), infoString(info) { }
inline DataChannel::ID getChannelID() const { return channelID; }
inline DataChannel::Type getChannelType() const { return channelType; }
inline std::string getInfoString() const { return infoString; }
private:
DataChannel::ID channelID;
DataChannel::Type channelType;
std::string infoString;
};
/**
* \brief Base class for the data service (background sending and receiving, dispatching to channels)
*/
class DataChannelServiceBase: public std::enable_shared_from_this<DataChannelServiceBase> {
public:
DataChannelServiceBase();
~DataChannelServiceBase();
void process();
DataChannel::ID registerChannel(std::shared_ptr<DataChannel> channel);
virtual int sendDataInternal(unsigned char* compiledMessage, unsigned int messageSize, sockaddr_in* recipient);
int sendDataIsolatedPacket(DataChannel::ID id, DataChannel::Type type, unsigned char* data, unsigned int dataSize, sockaddr_in* recipient);
virtual int handleChannel0Message(DataChannelMessage& message, sockaddr_in* sender) = 0;
protected:
std::map<DataChannel::ID, std::shared_ptr<DataChannel> > channels;
#ifdef _WIN32
SOCKET dataChannelSocket;
#else
int dataChannelSocket;
#endif
DataChannelMessage message;
};
}} // namespaces
#endif

View file

@ -0,0 +1,235 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include <cstring>
#include "visiontransfer/deviceenumeration.h"
#include "visiontransfer/exceptions.h"
#include "visiontransfer/networking.h"
#include "visiontransfer/internalinformation.h"
using namespace std;
using namespace visiontransfer;
using namespace visiontransfer::internal;
namespace visiontransfer {
/*************** Pimpl class containing all private members ***********/
class DeviceEnumeration::Pimpl {
public:
Pimpl();
~Pimpl();
DeviceInfo* getDevicesPointer(int* numDevices);
private:
static constexpr int RESPONSE_WAIT_TIME_MS = 50;
SOCKET sock;
std::vector<DeviceInfo> deviceList;
std::vector<sockaddr_in> findBroadcastAddresses();
void sendDiscoverBroadcast();
DeviceEnumeration::DeviceList collectDiscoverResponses();
};
/******************** Stubs for all public members ********************/
DeviceEnumeration::DeviceEnumeration():
pimpl(new Pimpl()) {
// All initialization in the pimpl class
}
DeviceEnumeration::~DeviceEnumeration() {
delete pimpl;
}
DeviceInfo* DeviceEnumeration::getDevicesPointer(int* numDevices) {
return pimpl->getDevicesPointer(numDevices);
}
/******************** Implementation in pimpl class *******************/
DeviceEnumeration::Pimpl::Pimpl() {
Networking::initNetworking();
// Create socket
if((sock = ::socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP)) == INVALID_SOCKET) {
TransferException ex("Error creating broadcast socket: " + string(strerror(errno)));
throw ex;
}
// Set broadcast flag
int broadcastPermission = 1;
if(setsockopt(sock, SOL_SOCKET, SO_BROADCAST, reinterpret_cast<char*>(&broadcastPermission),
sizeof(broadcastPermission)) < 0) {
TransferException ex("Error setting socket broadcast flag: " + string(strerror(errno)));
throw ex;
}
// Set sending and receive timeouts
#ifdef _WIN32
unsigned int timeout = RESPONSE_WAIT_TIME_MS;
#else
struct timeval timeout;
timeout.tv_sec = 0;
timeout.tv_usec = RESPONSE_WAIT_TIME_MS*1000;
#endif
setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, reinterpret_cast<char*>(&timeout), sizeof(timeout));
setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, reinterpret_cast<char*>(&timeout), sizeof(timeout));
}
DeviceEnumeration::Pimpl::~Pimpl() {
close(sock);
}
DeviceInfo* DeviceEnumeration::Pimpl::getDevicesPointer(int* numDevices) {
sendDiscoverBroadcast();
deviceList = collectDiscoverResponses();
// Convert vector to simple pointer
*numDevices = deviceList.size();
return deviceList.data();
}
void DeviceEnumeration::Pimpl::sendDiscoverBroadcast() {
std::vector<sockaddr_in> addresses = findBroadcastAddresses();
for(sockaddr_in addr: addresses) {
addr.sin_port = htons(InternalInformation::DISCOVERY_BROADCAST_PORT);
if (sendto(sock, InternalInformation::DISCOVERY_BROADCAST_MSG,
sizeof(InternalInformation::DISCOVERY_BROADCAST_MSG)-1, 0,
(struct sockaddr *) &addr, sizeof(addr))
!= sizeof(InternalInformation::DISCOVERY_BROADCAST_MSG)-1) {
throw std::runtime_error("Error sending broadcast message");
}
}
}
DeviceEnumeration::DeviceList DeviceEnumeration::Pimpl::collectDiscoverResponses() {
DeviceList ret;
while(true) {
InternalInformation::DiscoveryMessage msg;
sockaddr_in senderAddress;
socklen_t senderLength = sizeof(senderAddress);
int received = recvfrom(sock, reinterpret_cast<char*>(&msg), sizeof(msg),
0, (sockaddr *)&senderAddress, &senderLength);
if(received < 0) {
// There are no more replies
break;
}
bool isLegacy = received == sizeof(InternalInformation::DiscoveryMessageBasic);
if((received != sizeof(msg)) && !isLegacy ) {
// Invalid message
continue;
}
// Zero terminate version string
char fwVersion[sizeof(msg.firmwareVersion)+1];
memcpy(fwVersion, msg.firmwareVersion, sizeof(msg.firmwareVersion));
fwVersion[sizeof(msg.firmwareVersion)] = '\0';
DeviceStatus status;
if (!isLegacy) {
// Construct health status report
status = DeviceStatus(msg.lastFps, msg.jumboSize, msg.currentCaptureSource);
}
// Add to result list
DeviceInfo info(
inet_ntoa(senderAddress.sin_addr),
msg.useTcp ? DeviceInfo::PROTOCOL_TCP : DeviceInfo::PROTOCOL_UDP,
fwVersion,
(DeviceInfo::DeviceModel)msg.model,
msg.protocolVersion == InternalInformation::CURRENT_PROTOCOL_VERSION,
status
);
ret.push_back(info);
}
return ret;
}
std::vector<sockaddr_in> DeviceEnumeration::Pimpl::findBroadcastAddresses() {
std::vector<sockaddr_in> ret;
#ifndef _WIN32
// BSD-style implementation
struct ifaddrs * ifap;
if (getifaddrs(&ifap) == 0) {
struct ifaddrs * p = ifap;
while(p) {
if(p->ifa_dstaddr != nullptr && p->ifa_dstaddr->sa_family == AF_INET) {
ret.push_back(*reinterpret_cast<sockaddr_in*>(p->ifa_dstaddr));
}
p = p->ifa_next;
}
freeifaddrs(ifap);
}
#else
// Windows XP style implementation
// Adapted from example code at http://msdn2.microsoft.com/en-us/library/aa365917.aspx
// Now get Windows' IPv4 addresses table. We gotta call GetIpAddrTable()
// multiple times in order to deal with potential race conditions properly.
MIB_IPADDRTABLE* ipTable = nullptr;
ULONG bufLen = 0;
for (int i=0; i<5; i++) {
DWORD ipRet = GetIpAddrTable(ipTable, &bufLen, false);
if (ipRet == ERROR_INSUFFICIENT_BUFFER) {
if(ipTable != nullptr) {
delete []reinterpret_cast<unsigned char*>(ipTable); // in case we had previously allocated it
}
ipTable = reinterpret_cast<MIB_IPADDRTABLE *>(new unsigned char[bufLen]);
memset(ipTable, 0, bufLen);
} else if (ipRet == NO_ERROR) {
break;
} else {
if(ipTable != nullptr) {
delete []reinterpret_cast<unsigned char*>(ipTable);
}
break;
}
}
if (ipTable != nullptr) {
for (DWORD i=0; i<ipTable->dwNumEntries; i++) {
const MIB_IPADDRROW & row = ipTable->table[i];
uint32_t ipAddr = row.dwAddr;
uint32_t netmask = row.dwMask;
uint32_t baddr = ipAddr & netmask;
if (row.dwBCastAddr) {
baddr |= ~netmask;
}
sockaddr_in addr;
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = baddr;
ret.push_back(addr);
}
delete []reinterpret_cast<unsigned char*>(ipTable);
}
#endif
return ret;
}
} // namespace

View file

@ -0,0 +1,65 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_DEVICEENUMERATION_H
#define VISIONTRANSFER_DEVICEENUMERATION_H
#include <vector>
#include "visiontransfer/common.h"
#include "visiontransfer/deviceinfo.h"
namespace visiontransfer {
/**
* \brief Allows for the discovery of devices in the network.
*
* Devices are discovered by transmitting a broad-cast message to all
* network interfaces and then waiting for replies.
*/
class VT_EXPORT DeviceEnumeration {
public:
typedef std::vector<DeviceInfo> DeviceList;
DeviceEnumeration();
~DeviceEnumeration();
/**
* \brief Discovers new devices and returns the list of all devices
* that have been found
* \return List of devices found
*/
DeviceList discoverDevices() {
// This code is inlined in order to provide binary compatibility with
// different STL implementations
int numDevices = 0;
DeviceInfo* devices = getDevicesPointer(&numDevices);
std::vector<DeviceInfo> ret(devices, &devices[numDevices]);
return ret;
}
private:
// We follow the pimpl idiom
class Pimpl;
Pimpl* pimpl;
// This class cannot be copied
DeviceEnumeration(const DeviceEnumeration& other);
DeviceEnumeration& operator=(const DeviceEnumeration&);
DeviceInfo* getDevicesPointer(int* numDevices);
};
} // namespace
#endif

View file

@ -0,0 +1,176 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_DEVICEINFO_H
#define VISIONTRANSFER_DEVICEINFO_H
#include <string>
namespace visiontransfer {
/**
* \brief Representation of the current device status / health.
* Useful for addressing issues with peripherals or network transport.
*/
class DeviceStatus {
public:
DeviceStatus()
: lastFps(0.0), jumboSize(0), currentCaptureSource(""), validStatus(false) { }
DeviceStatus(double lastFps, unsigned int jumboSize, const std::string& currentCaptureSource)
: lastFps(lastFps), jumboSize(jumboSize), currentCaptureSource(currentCaptureSource), validStatus(true) { }
bool isValid() const { return validStatus; }
double getLastFps() const { return lastFps; }
unsigned int getJumboMtu() const { return jumboSize; }
unsigned int getJumboFramesEnabled() const { return jumboSize > 0; }
std::string getCurrentCaptureSource() const { return currentCaptureSource; }
private:
double lastFps; // Most recent FPS report, or 0.0 if N/A
unsigned int jumboSize; // Jumbo MTU, or 0 if Jumbo mode disabled
std::string currentCaptureSource; // for targeted instructions
bool validStatus; // whether the status record contains actual data
};
/**
* \brief Aggregates information about a discovered device
*/
class DeviceInfo {
public:
enum DeviceModel {
SCENESCAN,
SCENESCAN_PRO,
SCARLET
};
enum NetworkProtocol {
PROTOCOL_TCP,
PROTOCOL_UDP
};
/**
* \brief Constructs an empty object with default information
*/
DeviceInfo(): ip(""), protocol(PROTOCOL_TCP), fwVersion(""), model(SCENESCAN),
compatible(false) {
}
/**
* \brief Constructs an object by initializing all members with data
* from the given parameters
*
* \param ip IP address of the discovered device.
* \param protocol Network protocol of the discovered device.
* \param fwVersion Firmware version as string.
* \param model Model of the discovered device
* \param compatible Indicates if the device is compatible with this
* API version.
*/
DeviceInfo(const char* ip, NetworkProtocol protocol, const char* fwVersion,
DeviceModel model, bool compatible)
: ip(ip), protocol(protocol), fwVersion(fwVersion), model(model),
compatible(compatible) {
}
/**
* \brief Construct DeviceInfo with pre-initialized DeviceStatus field, for received health reports
*/
DeviceInfo(const char* ip, NetworkProtocol protocol, const char* fwVersion,
DeviceModel model, bool compatible, const DeviceStatus& status)
: ip(ip), protocol(protocol), fwVersion(fwVersion), model(model),
compatible(compatible), status(status){
}
/**
* \brief Gets the IP address of the device.
* \return Device IP address.
*/
std::string getIpAddress() const {return ip;}
/**
* \brief Gets the network protocol of the device.
* \return Device network protocol.
*
* Possible network protocols are \c PROTOCOL_TCP or \c PROTOCOL_UDP.
*/
NetworkProtocol getNetworkProtocol() const {return protocol;}
/**
* \brief Gets the firmware version of the device.
* \return Firmware version encoded as string.
*
* A firmware version string typically consists of a major, minor
* and patch version, like for example "1.2.34". For special
* firmware releases, however, the firmware string might deviate.
*/
std::string getFirmwareVersion() const {return fwVersion;}
/**
* \brief Gets the model identifier of the discovered device.
* \return The device model.
*
* Currently supported models are \c SCENESCAN, \c SCENESCAN_PRO and
* SCARLET.
*/
DeviceModel getModel() const {return model;}
/**
* \brief Return the status / health as reported by the device
*/
DeviceStatus getStatus() const { return status; }
/**
* \brief Returns true if the device is compatible with this API
* version
*/
bool isCompatible() const {return compatible;}
/**
* \brief Converts this object to a printable string.
*
* All information is concatenated into a readable string, which
* can for example be printed to a terminal.
*/
std::string toString() const {
std::string ret = ip + "; ";
switch(model) {
case SCENESCAN_PRO: ret += "SceneScan Pro"; break;
case SCENESCAN: ret += "SceneScan"; break;
case SCARLET: ret += "Scarlet"; break;
default: ret += "Unknown"; break;
}
ret += "; " + fwVersion + "; " + (compatible ? "compatible" : "incompatible");
return ret;
}
/**
* \brief Comparison operator for comparing two DeviceInfo objects.
*/
bool operator == (const DeviceInfo& other) const {
return ip == other.ip && protocol == other.protocol && fwVersion == other.fwVersion
&& model == other.model && compatible == other.compatible;
}
private:
std::string ip;
NetworkProtocol protocol;
std::string fwVersion;
DeviceModel model;
bool compatible;
// Extended device status / health info
DeviceStatus status;
};
} // namespace
#endif

View file

@ -0,0 +1,261 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include "visiontransfer/deviceparameters.h"
#include "visiontransfer/parametertransfer.h"
#include "visiontransfer/exceptions.h"
#include "visiontransfer/common.h"
using namespace visiontransfer;
using namespace visiontransfer::internal;
namespace visiontransfer {
/*************** Pimpl class containing all private members ***********/
class DeviceParameters::Pimpl {
public:
Pimpl(const DeviceInfo& device);
Pimpl(const char* address, const char* service);
int readIntParameter(int id);
double readDoubleParameter(int id);
bool readBoolParameter(int id);
void writeIntParameter(int id, int value);
void writeDoubleParameter(int id, double value);
void writeBoolParameter(int id, bool value);
std::map<std::string, ParameterInfo> getAllParameters();
// this template is selected for non-floating point arguments (i.e. int and bool).
template<typename T>
void setParameter_impl(StandardParameterIDs::ParameterID id, ParameterInfo::ParameterType type, T value, ...)
{
int cid = static_cast<int>(id);
switch (type) {
case ParameterInfo::TYPE_INT: {
writeIntParameter(cid, static_cast<int>(value));
break;
}
case ParameterInfo::TYPE_BOOL: {
writeBoolParameter(cid, value != 0);
break;
}
case ParameterInfo::TYPE_DOUBLE: {
writeDoubleParameter(cid, static_cast<double>(value));
break;
}
}
}
// this template is selected for floating point arguments
template<typename T, typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
void setParameter_impl(StandardParameterIDs::ParameterID id, ParameterInfo::ParameterType type, T value, double)
{
int cid = static_cast<int>(id);
switch (type) {
case ParameterInfo::TYPE_DOUBLE: {
writeDoubleParameter(cid, value);
break;
}
case ParameterInfo::TYPE_INT: {
writeIntParameter(cid, static_cast<int>(value));
break;
}
case ParameterInfo::TYPE_BOOL: {
writeBoolParameter(cid, value != 0);
break;
}
}
}
template <typename T>
void setParameter(StandardParameterIDs::ParameterID id, ParameterInfo::ParameterType type, T t) {
setParameter_impl<T>(id, type, t, double{});
}
ParameterInfo getParameter(const std::string& name);
void lookupIDAndType(const std::string& name, internal::StandardParameterIDs::ParameterID& id, ParameterInfo::ParameterType& type);
private:
std::map<std::string, ParameterInfo> serverSideEnumeration;
std::map<std::string, ParameterInfo> getAllParametersInternal();
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template<typename T>
void setNamedParameterInternal(const std::string& name, T value);
#endif
ParameterTransfer paramTrans;
};
/******************** Stubs for all public members ********************/
DeviceParameters::DeviceParameters(const DeviceInfo& device):
pimpl(new Pimpl(device)) {
// All initialization in the pimpl class
}
DeviceParameters::DeviceParameters(const char* address, const char* service):
pimpl(new Pimpl(address, service)) {
// All initialization in the pimpl class
}
DeviceParameters::~DeviceParameters() {
delete pimpl;
}
int DeviceParameters::readIntParameter(int id) {
return pimpl->readIntParameter(id);
}
double DeviceParameters::readDoubleParameter(int id) {
return pimpl->readDoubleParameter(id);
}
bool DeviceParameters::readBoolParameter(int id) {
return pimpl->readBoolParameter(id);
}
void DeviceParameters::writeIntParameter(int id, int value) {
pimpl->writeIntParameter(id, value);
}
void DeviceParameters::writeDoubleParameter(int id, double value) {
pimpl->writeDoubleParameter(id, value);
}
void DeviceParameters::writeBoolParameter(int id, bool value) {
pimpl->writeBoolParameter(id, value);
}
void DeviceParameters::Pimpl::lookupIDAndType(const std::string& name, StandardParameterIDs::ParameterID& id, ParameterInfo::ParameterType& type) {
if (serverSideEnumeration.size() == 0) {
// get the server-side parameter list first (which reports the types as well)
(void) getAllParameters();
}
id = StandardParameterIDs::getParameterIDForName(name);
if (id == StandardParameterIDs::ParameterID::UNDEFINED) {
ParameterException ex("Cannot access parameter with unknown name: " + name);
throw ex;
}
auto it = serverSideEnumeration.find(name);
if (it == serverSideEnumeration.end()) {
ParameterException ex("Server did not report the parameter in the supported list: " + name);
throw ex;
}
type = it->second.getType();
}
std::map<std::string, ParameterInfo> DeviceParameters::getAllParameters()
{
return pimpl->getAllParameters();
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template<>
void VT_EXPORT DeviceParameters::setNamedParameter(const std::string& name, double value) {
StandardParameterIDs::ParameterID id;
ParameterInfo::ParameterType type;
pimpl->lookupIDAndType(name, id, type);
pimpl->setParameter<double>(id, type, value);
}
template<>
void VT_EXPORT DeviceParameters::setNamedParameter(const std::string& name, int value) {
StandardParameterIDs::ParameterID id;
ParameterInfo::ParameterType type;
pimpl->lookupIDAndType(name, id, type);
pimpl->setParameter<int>(id, type, value);
}
template<>
void VT_EXPORT DeviceParameters::setNamedParameter(const std::string& name, bool value) {
StandardParameterIDs::ParameterID id;
ParameterInfo::ParameterType type;
pimpl->lookupIDAndType(name, id, type);
pimpl->setParameter<bool>(id, type, value);
}
template<>
int VT_EXPORT DeviceParameters::getNamedParameter(const std::string& name) {
StandardParameterIDs::ParameterID id;
ParameterInfo::ParameterType type;
pimpl->lookupIDAndType(name, id, type);
return pimpl->getParameter(name).getValue<int>();
}
template<>
double VT_EXPORT DeviceParameters::getNamedParameter(const std::string& name) {
StandardParameterIDs::ParameterID id;
ParameterInfo::ParameterType type;
pimpl->lookupIDAndType(name, id, type);
return pimpl->getParameter(name).getValue<double>();
}
template<>
bool VT_EXPORT DeviceParameters::getNamedParameter(const std::string& name) {
StandardParameterIDs::ParameterID id;
ParameterInfo::ParameterType type;
pimpl->lookupIDAndType(name, id, type);
return pimpl->getParameter(name).getValue<bool>();
}
#endif
/******************** Implementation in pimpl class *******************/
DeviceParameters::Pimpl::Pimpl(const char* address, const char* service)
: paramTrans(address, service) {
}
DeviceParameters::Pimpl::Pimpl(const DeviceInfo& device)
: paramTrans(device.getIpAddress().c_str(), "7683") {
}
int DeviceParameters::Pimpl::readIntParameter(int id) {
return paramTrans.readIntParameter(id);
}
double DeviceParameters::Pimpl::readDoubleParameter(int id) {
return paramTrans.readDoubleParameter(id);
}
bool DeviceParameters::Pimpl::readBoolParameter(int id) {
return paramTrans.readBoolParameter(id);
}
void DeviceParameters::Pimpl::writeIntParameter(int id, int value) {
paramTrans.writeIntParameter(id, value);
}
void DeviceParameters::Pimpl::writeDoubleParameter(int id, double value) {
paramTrans.writeDoubleParameter(id, value);
}
void DeviceParameters::Pimpl::writeBoolParameter(int id, bool value) {
paramTrans.writeBoolParameter(id, value);
}
std::map<std::string, ParameterInfo> DeviceParameters::Pimpl::getAllParameters() {
serverSideEnumeration = paramTrans.getAllParameters();
return serverSideEnumeration;
}
ParameterInfo DeviceParameters::Pimpl::getParameter(const std::string& name)
{
return serverSideEnumeration[name];
}
} // namespace

View file

@ -0,0 +1,48 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_EXCEPTIONS_H
#define VISIONTRANSFER_EXCEPTIONS_H
#include <stdexcept>
namespace visiontransfer {
/**
* \brief Exception class that is used for all protocol exceptions.
*/
class ProtocolException: public std::runtime_error {
public:
ProtocolException(std::string msg): std::runtime_error(msg) {}
};
/**
* \brief Exception class that is used for all transfer exceptions.
*/
class TransferException: public std::runtime_error {
public:
TransferException(std::string msg): std::runtime_error(msg) {}
};
/**
* \brief Exception class that is used for all parameter-related exceptions.
*/
class ParameterException: public std::runtime_error {
public:
ParameterException(std::string msg): std::runtime_error(msg) {}
};
} // namespace
#endif

View file

@ -0,0 +1,23 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_IMAGEPAIR_H
#define VISIONTRANSFER_IMAGEPAIR_H
#include "visiontransfer/imageset.h"
#pragma message "DEPRECATION NOTICE: imagepair.h and ImagePair are deprecated in favor of imageset.h and ImageSet"
#endif

View file

@ -0,0 +1,962 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include <cstring>
#include <iostream>
#include <limits>
#include <vector>
#include <memory>
#include <algorithm>
#include "visiontransfer/imageprotocol.h"
#include "visiontransfer/alignedallocator.h"
#include "visiontransfer/datablockprotocol.h"
#include "visiontransfer/exceptions.h"
#include "visiontransfer/bitconversions.h"
#include "visiontransfer/internalinformation.h"
// Network headers
#ifdef _WIN32
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <winsock2.h>
#else
#include <arpa/inet.h>
#endif
#define LOG_WARN(expr)
//#define LOG_WARN(expr) std::cerr << "DataBlockProtocol: " << expr << std::endl
using namespace std;
using namespace visiontransfer;
using namespace visiontransfer::internal;
namespace visiontransfer {
/*************** Pimpl class containing all private members ***********/
class ImageProtocol::Pimpl {
public:
static const int IMAGE_HEADER_OFFSET = sizeof(DataBlockProtocol::HeaderPreamble) + 10;
Pimpl(bool server, ProtocolType protType, int maxUdpPacketSize);
// Redeclaration of public members
void setTransferImageSet(const ImageSet& imageSet);
void setRawTransferData(const ImageSet& metaData, const std::vector<unsigned char*>& rawData,
int firstTileWidth = 0, int middleTilesWidth = 0, int lastTileWidth = 0, int validBytes = 0x7FFFFFFF);
void setRawValidBytes(const std::vector<int>& validBytesVec);
const unsigned char* getTransferMessage(int& length);
bool transferComplete();
void resetTransfer();
bool getReceivedImageSet(ImageSet& imageSet);
bool getPartiallyReceivedImageSet(ImageSet& imageSet,
int& validRows, bool& complete);
bool imagesReceived() const;
unsigned char* getNextReceiveBuffer(int& maxLength);
void processReceivedMessage(int length);
int getProspectiveMessageSize();
int getNumDroppedFrames() const;
void resetReception();
bool isConnected() const;
const unsigned char* getNextControlMessage(int& length);
bool newClientConnected();
std::string statusReport();
private:
unsigned short MAGIC_SEQUECE = 0x3D15;
// Header data transferred in the first packet
#pragma pack(push,1)
struct HeaderDataLegacy {
unsigned short magic;
unsigned char protocolVersion;
unsigned char isRawImagePair_OBSOLETE;
unsigned short width;
unsigned short height;
unsigned short firstTileWidth;
unsigned short lastTileWidth;
unsigned char format0;
unsigned char format1;
unsigned short minDisparity;
unsigned short maxDisparity;
unsigned char subpixelFactor;
unsigned int seqNum;
int timeSec;
int timeMicrosec;
float q[16];
unsigned short middleTilesWidth;
};
// Header data v2: extensible and forwards-compatible
struct HeaderDataV2: public HeaderDataLegacy {
unsigned short totalHeaderSize;
unsigned short flags;
unsigned char numberOfImages;
unsigned char format2;
enum FlagBits {
NEW_STYLE_TRANSFER = 1,
HEADER_V3 = 2,
HEADER_V4 = 4,
// future protocol extensions should mark a new bit here
};
};
// Header data v3, adds arbitrary image channel assignments
struct HeaderDataV3: public HeaderDataV2 {
// HEADER_V3 bit implies that this extension is present,
// declaring arbitrary channel roles for each of numberOfImages active channels.
// If not present, is it an old sender that always sends two images
// (channel 0: left, channel 1: right or disparity (if active))
unsigned char imageTypes[8];
};
// Header data v4, adds exposure time and sync pulse
struct HeaderData: public HeaderDataV3 {
int exposureTime; // exposure time in microseconds
int lastSyncPulseSec;
int lastSyncPulseMicrosec;
};
#pragma pack(pop)
// Underlying protocol for data transfers
DataBlockProtocol dataProt;
ProtocolType protType;
// Transfer related variables
std::vector<unsigned char> headerBuffer;
// Reception related variables
std::vector<unsigned char, AlignedAllocator<unsigned char> >decodeBuffer[ImageSet::MAX_SUPPORTED_IMAGES];
bool receiveHeaderParsed;
HeaderData receiveHeader;
int lastReceivedPayloadBytes[ImageSet::MAX_SUPPORTED_IMAGES];
bool receptionDone;
// Copies the transmission header to the given buffer
void copyHeaderToBuffer(const ImageSet& imageSet, int firstTileWidth,
int middleTilesWidth, int lastTileWidth, unsigned char* buffer);
// Decodes header information from the received data
void tryDecodeHeader(const unsigned char* receivedData, int receivedBytes);
// Decodes a received image from a non-interleaved buffer
unsigned char* decodeNoninterleaved(int imageNumber, int numImages, int receivedBytes,
unsigned char* data, int& validRows, int& rowStride);
// Decodes a received image from an interleaved buffer
unsigned char* decodeInterleaved(int imageNumber, int numImages, int receivedBytes,
unsigned char* data, int& validRows, int& rowStride);
int getNumTiles(int width, int firstTileWidth, int middleTilesWidth, int lastTileWidth);
int getFrameSize(int width, int height, int firstTileWidth, int middleTilesWidth,
int lastTileWidth, int totalBits);
int getFormatBits(ImageSet::ImageFormat format, bool afterDecode);
void decodeTiledImage(int imageNumber, int lastReceivedPayloadBytes, int receivedPayloadBytes,
const unsigned char* data, int firstTileStride, int middleTilesStride, int lastTileStride,
int& validRows, ImageSet::ImageFormat format, bool dataIsInterleaved);
void decodeRowsFromTile(int startRow, int stopRow, unsigned const char* src,
unsigned char* dst, int srcStride, int dstStride, int tileWidth);
void allocateDecodeBuffer(int imageNumber);
};
/******************** Stubs for all public members ********************/
ImageProtocol::ImageProtocol(bool server, ProtocolType protType, int maxUdpPacketSize)
: pimpl(new Pimpl(server, protType, maxUdpPacketSize)) {
// All initializations are done by the Pimpl class
}
ImageProtocol::~ImageProtocol() {
delete pimpl;
}
void ImageProtocol::setTransferImageSet(const ImageSet& imageSet) {
pimpl->setTransferImageSet(imageSet);
}
void ImageProtocol::setRawTransferData(const ImageSet& metaData, const std::vector<unsigned char*>& imageData,
int firstTileWidth, int middleTilesWidth, int lastTileWidth, int validBytes) {
pimpl->setRawTransferData(metaData, imageData, firstTileWidth, middleTilesWidth, lastTileWidth, validBytes);
}
void ImageProtocol::setRawValidBytes(const std::vector<int>& validBytesVec) {
pimpl->setRawValidBytes(validBytesVec);
}
const unsigned char* ImageProtocol::getTransferMessage(int& length) {
return pimpl->getTransferMessage(length);
}
bool ImageProtocol::transferComplete() {
return pimpl->transferComplete();
}
void ImageProtocol::resetTransfer() {
pimpl->resetTransfer();
}
bool ImageProtocol::getReceivedImageSet(ImageSet& imageSet) {
return pimpl->getReceivedImageSet(imageSet);
}
bool ImageProtocol::getPartiallyReceivedImageSet(
ImageSet& imageSet, int& validRows, bool& complete) {
return pimpl->getPartiallyReceivedImageSet(imageSet, validRows, complete);
}
bool ImageProtocol::imagesReceived() const {
return pimpl->imagesReceived();
}
unsigned char* ImageProtocol::getNextReceiveBuffer(int& maxLength) {
return pimpl->getNextReceiveBuffer(maxLength);
}
void ImageProtocol::processReceivedMessage(int length) {
pimpl->processReceivedMessage(length);
}
int ImageProtocol::getNumDroppedFrames() const {
return pimpl->getNumDroppedFrames();
}
void ImageProtocol::resetReception() {
pimpl->resetReception();
}
bool ImageProtocol::isConnected() const {
return pimpl->isConnected();
}
const unsigned char* ImageProtocol::getNextControlMessage(int& length) {
return pimpl->getNextControlMessage(length);
}
bool ImageProtocol::newClientConnected() {
return pimpl->newClientConnected();
}
/******************** Implementation in pimpl class *******************/
ImageProtocol::Pimpl::Pimpl(bool server, ProtocolType protType, int maxUdpPacketSize)
:dataProt(server, (DataBlockProtocol::ProtocolType)protType,
maxUdpPacketSize), protType(protType),
receiveHeaderParsed(false), lastReceivedPayloadBytes{0},
receptionDone(false) {
headerBuffer.resize(sizeof(HeaderData) + 128);
memset(&headerBuffer[0], 0, sizeof(headerBuffer.size()));
memset(&receiveHeader, 0, sizeof(receiveHeader));
}
void ImageProtocol::Pimpl::setTransferImageSet(const ImageSet& imageSet) {
for (int i=0; i<imageSet.getNumberOfImages(); ++i) {
if(imageSet.getPixelData(i) == nullptr) {
throw ProtocolException("Image data is null pointer!");
}
}
// Set header as first piece of data
copyHeaderToBuffer(imageSet, 0, 0, 0, &headerBuffer[IMAGE_HEADER_OFFSET]);
dataProt.resetTransfer();
int numTransferBlocks = imageSet.getNumberOfImages();
dataProt.setTransferHeader(&headerBuffer[IMAGE_HEADER_OFFSET], sizeof(HeaderData), numTransferBlocks);
for (int i=0; i<imageSet.getNumberOfImages(); ++i) {
int bits = getFormatBits(imageSet.getPixelFormat(i), false);
int rawDataLength = getFrameSize(imageSet.getWidth(), imageSet.getHeight(), 0, 0, 0, bits);
dataProt.setTransferBytes(i, rawDataLength);
}
// Perform 12 bit packed encoding if necessary
int bits[ImageSet::MAX_SUPPORTED_IMAGES] = {0};
int rowSize[ImageSet::MAX_SUPPORTED_IMAGES] = {0};
const unsigned char* pixelData[ImageSet::MAX_SUPPORTED_IMAGES] = {nullptr};
std::vector<unsigned char> encodingBuffer[ImageSet::MAX_SUPPORTED_IMAGES];
for(int i = 0; i<imageSet.getNumberOfImages(); i++) {
bits[i] = getFormatBits(imageSet.getPixelFormat(i), false);
rowSize[i] = imageSet.getWidth()*bits[i]/8;
if(imageSet.getPixelFormat(i) != ImageSet::FORMAT_12_BIT_MONO) {
pixelData[i] = imageSet.getPixelData(i);
} else {
encodingBuffer[i].resize(rowSize[i] * imageSet.getHeight());
BitConversions::encode12BitPacked(0, imageSet.getHeight(), imageSet.getPixelData(i),
&encodingBuffer[i][0], imageSet.getRowStride(i), rowSize[i], imageSet.getWidth());
pixelData[i] = &encodingBuffer[i][0];
}
}
for (int i=0; i<imageSet.getNumberOfImages(); ++i) {
dataProt.setTransferData(i, const_cast<unsigned char*>(pixelData[i])); // these are always reserved memory or untile buffers
}
}
void ImageProtocol::Pimpl::setRawTransferData(const ImageSet& metaData, const std::vector<unsigned char*>& rawData,
int firstTileWidth, int middleTilesWidth, int lastTileWidth, int validBytes) {
if(static_cast<int>(rawData.size()) != metaData.getNumberOfImages()) {
throw ProtocolException("Mismatch between metadata and number of image buffers!");
}
// Set header as first piece of data
copyHeaderToBuffer(metaData, firstTileWidth, middleTilesWidth, lastTileWidth, &headerBuffer[IMAGE_HEADER_OFFSET]);
dataProt.resetTransfer();
int numTransferBlocks = metaData.getNumberOfImages();
dataProt.setTransferHeader(&headerBuffer[IMAGE_HEADER_OFFSET], sizeof(HeaderData), numTransferBlocks);
// Now set the size per channel (replaces old final size argument to setTransferHeader()
for (int i=0; i<metaData.getNumberOfImages(); ++i) {
int rawDataLength = getFrameSize(metaData.getWidth(), metaData.getHeight(),
firstTileWidth, middleTilesWidth, lastTileWidth, metaData.getBitsPerPixel(i));
dataProt.setTransferBytes(i, rawDataLength);
}
for (int i=0; i<metaData.getNumberOfImages(); ++i) {
dataProt.setTransferData(i, rawData[i]);
}
}
void ImageProtocol::Pimpl::setRawValidBytes(const std::vector<int>& validBytesVec) {
for (int i=0; i<static_cast<int>(validBytesVec.size()); ++i) {
dataProt.setTransferValidBytes(i, validBytesVec[i]);
}
}
const unsigned char* ImageProtocol::Pimpl::getTransferMessage(int& length) {
const unsigned char* msg = dataProt.getTransferMessage(length);
if(msg == nullptr) {
msg = dataProt.getTransferMessage(length);
}
return msg;
}
bool ImageProtocol::Pimpl::transferComplete() {
return dataProt.transferComplete();
}
int ImageProtocol::Pimpl::getNumTiles(int width, int firstTileWidth, int middleTilesWidth, int lastTileWidth) {
if(lastTileWidth == 0) {
return 1;
} else if(middleTilesWidth == 0) {
return 2;
} else {
int tileWidth = firstTileWidth + lastTileWidth - middleTilesWidth;
return (width - 2*tileWidth + firstTileWidth + lastTileWidth) / (firstTileWidth + lastTileWidth - tileWidth);
}
}
int ImageProtocol::Pimpl::getFrameSize(int width, int height, int firstTileWidth,
int middleTilesWidth, int lastTileWidth, int totalBits) {
return (width * height * totalBits) /8;
}
int ImageProtocol::Pimpl::getFormatBits(ImageSet::ImageFormat format, bool afterDecode) {
if(afterDecode) {
return ImageSet::getBytesPerPixel(format)*8;
} else {
switch(format) {
case ImageSet::FORMAT_8_BIT_MONO: return 8;
case ImageSet::FORMAT_12_BIT_MONO: return 12;
case ImageSet::FORMAT_8_BIT_RGB: return 24;
default: throw ProtocolException("Illegal pixel format!");
}
}
}
void ImageProtocol::Pimpl::copyHeaderToBuffer(const ImageSet& imageSet,
int firstTileWidth, int middleTilesWidth, int lastTileWidth, unsigned char* buffer) {
int timeSec = 0, timeMicrosec = 0;
HeaderData* transferHeader = reinterpret_cast<HeaderData*>(buffer);
memset(transferHeader, 0, sizeof(*transferHeader));
transferHeader->magic = htons(MAGIC_SEQUECE);
transferHeader->protocolVersion = InternalInformation::CURRENT_PROTOCOL_VERSION;
transferHeader->isRawImagePair_OBSOLETE = 0;
transferHeader->width = htons(imageSet.getWidth());
transferHeader->height = htons(imageSet.getHeight());
transferHeader->firstTileWidth = htons(firstTileWidth);
transferHeader->lastTileWidth = htons(lastTileWidth);
transferHeader->middleTilesWidth = htons(middleTilesWidth);
transferHeader->format0 = static_cast<unsigned char>(imageSet.getPixelFormat(0));
transferHeader->format1 = (imageSet.getNumberOfImages() <= 1) ? 0 : static_cast<unsigned char>(imageSet.getPixelFormat(1));
transferHeader->seqNum = static_cast<unsigned int>(htonl(imageSet.getSequenceNumber()));
transferHeader->format2 = (imageSet.getNumberOfImages() <= 2) ? 0 : static_cast<unsigned char>(imageSet.getPixelFormat(2));
transferHeader->numberOfImages = static_cast<unsigned char>(imageSet.getNumberOfImages());
transferHeader->exposureTime = htonl(imageSet.getExposureTime());
imageSet.getLastSyncPulse(timeSec, timeMicrosec);
transferHeader->lastSyncPulseSec = htonl(timeSec);
transferHeader->lastSyncPulseMicrosec = htonl(timeMicrosec);
transferHeader->totalHeaderSize = htons(sizeof(HeaderData));
transferHeader->flags = htons(HeaderData::FlagBits::NEW_STYLE_TRANSFER | HeaderData::FlagBits::HEADER_V3
| HeaderData::FlagBits::HEADER_V4);
int minDisp = 0, maxDisp = 0;
imageSet.getDisparityRange(minDisp, maxDisp);
transferHeader->minDisparity = minDisp;
transferHeader->maxDisparity = maxDisp;
transferHeader->subpixelFactor = imageSet.getSubpixelFactor();
imageSet.getTimestamp(timeSec, timeMicrosec);
transferHeader->timeSec = static_cast<int>(htonl(static_cast<unsigned int>(timeSec)));
transferHeader->timeMicrosec = static_cast<int>(htonl(static_cast<unsigned int>(timeMicrosec)));
int numImageChannels = 0;
for (int i=0; i<(int) sizeof(transferHeader->imageTypes); ++i) {
transferHeader->imageTypes[i] = static_cast<unsigned char>(ImageSet::ImageType::IMAGE_UNDEFINED);
}
int idx = imageSet.getIndexOf(ImageSet::ImageType::IMAGE_LEFT);
if (idx>=0) {
transferHeader->imageTypes[idx] = static_cast<unsigned char>(ImageSet::ImageType::IMAGE_LEFT);
numImageChannels++;
}
idx = imageSet.getIndexOf(ImageSet::ImageType::IMAGE_RIGHT);
if (idx>=0) {
transferHeader->imageTypes[idx] = static_cast<unsigned char>(ImageSet::ImageType::IMAGE_RIGHT);
numImageChannels++;
}
idx = imageSet.getIndexOf(ImageSet::ImageType::IMAGE_DISPARITY);
if (idx>=0) {
transferHeader->imageTypes[idx] = static_cast<unsigned char>(ImageSet::ImageType::IMAGE_DISPARITY);
numImageChannels++;
}
if (numImageChannels != imageSet.getNumberOfImages()) {
throw std::runtime_error("Mismatch between reported number of images and enabled channel selection!");
}
if(imageSet.getQMatrix() != nullptr) {
memcpy(transferHeader->q, imageSet.getQMatrix(), sizeof(float)*16);
}
}
void ImageProtocol::Pimpl::resetTransfer() {
dataProt.resetTransfer();
}
unsigned char* ImageProtocol::Pimpl::getNextReceiveBuffer(int& maxLength) {
maxLength = dataProt.getMaxReceptionSize();
return dataProt.getNextReceiveBuffer(maxLength);
}
void ImageProtocol::Pimpl::processReceivedMessage(int length) {
receptionDone = false;
// Add the received message
dataProt.processReceivedMessage(length, receptionDone);
if(!dataProt.wasHeaderReceived() && receiveHeaderParsed) {
// Something went wrong. We need to reset!
LOG_WARN("Resetting image protocol!");
resetReception();
return;
}
int receivedBytes = 0;
dataProt.getReceivedData(receivedBytes);
// Immediately try to decode the header
if(!receiveHeaderParsed) {
int headerLen = 0;
unsigned char* headerData = dataProt.getReceivedHeader(headerLen);
if(headerData != nullptr) {
tryDecodeHeader(headerData, headerLen);
}
}
}
void ImageProtocol::Pimpl::tryDecodeHeader(const
unsigned char* receivedData, int receivedBytes) {
// Extra data fields that have been added to the header. Must be
// removed when the protocol version number is updated
constexpr int optionalDataSize = sizeof(receiveHeader.middleTilesWidth);
constexpr int mandatoryDataSize = static_cast<int>(sizeof(HeaderDataLegacy)) - optionalDataSize;
constexpr int fullyExtensibleHeaderSize = static_cast<int>(sizeof(HeaderDataV2));
bool isCompleteHeader = false;
if(receivedBytes >= mandatoryDataSize) {
if (receivedBytes < fullyExtensibleHeaderSize) {
*(static_cast<HeaderDataLegacy*>(&receiveHeader)) = *reinterpret_cast<const HeaderDataLegacy*>(receivedData);
} else {
memcpy(&receiveHeader, receivedData, std::min((size_t)receivedBytes, sizeof(HeaderData)));
receiveHeader = *reinterpret_cast<const HeaderData*>(receivedData);
isCompleteHeader = true;
}
if(receiveHeader.magic != htons(MAGIC_SEQUECE)) {
// Let's not call this an error. Perhaps it's just not a header
// packet
return;
}
if(receiveHeader.protocolVersion != InternalInformation::CURRENT_PROTOCOL_VERSION) {
throw ProtocolException("Protocol version mismatch!");
}
// Convert byte order
receiveHeader.width = ntohs(receiveHeader.width);
receiveHeader.height = ntohs(receiveHeader.height);
receiveHeader.firstTileWidth = ntohs(receiveHeader.firstTileWidth);
receiveHeader.lastTileWidth = ntohs(receiveHeader.lastTileWidth);
receiveHeader.timeSec = static_cast<int>(
ntohl(static_cast<unsigned int>(receiveHeader.timeSec)));
receiveHeader.timeMicrosec = static_cast<int>(
ntohl(static_cast<unsigned int>(receiveHeader.timeMicrosec)));
receiveHeader.seqNum = ntohl(receiveHeader.seqNum);
// Optional data items
if(receivedBytes >= mandatoryDataSize + optionalDataSize) {
receiveHeader.middleTilesWidth = ntohs(receiveHeader.middleTilesWidth);
} else {
receiveHeader.middleTilesWidth = 0;
}
if (isCompleteHeader) {
// This is a header of v2 or above, which self-reports its extension level in the flags field
receiveHeader.totalHeaderSize = ntohs(receiveHeader.totalHeaderSize);
receiveHeader.flags = ntohs(receiveHeader.flags);
receiveHeader.exposureTime = ntohl(receiveHeader.exposureTime);
receiveHeader.lastSyncPulseSec = htonl(receiveHeader.lastSyncPulseSec);
receiveHeader.lastSyncPulseMicrosec = htonl(receiveHeader.lastSyncPulseMicrosec);
} else {
// Infer missing fields for legacy compatibility transfers
receiveHeader.totalHeaderSize = (receivedBytes <= mandatoryDataSize) ? mandatoryDataSize : static_cast<int>(sizeof(HeaderDataLegacy));
receiveHeader.flags = 0;
receiveHeader.numberOfImages = 2;
receiveHeader.format2 = 0;
receiveHeader.exposureTime = 0;
receiveHeader.lastSyncPulseSec = 0;
receiveHeader.lastSyncPulseMicrosec = 0;
}
receiveHeaderParsed = true;
}
}
bool ImageProtocol::Pimpl::imagesReceived() const {
return receptionDone && receiveHeaderParsed;
}
bool ImageProtocol::Pimpl::getReceivedImageSet(ImageSet& imageSet) {
bool complete = false;
int validRows;
bool ok = getPartiallyReceivedImageSet(imageSet, validRows, complete);
return (ok && complete);
}
bool ImageProtocol::Pimpl::getPartiallyReceivedImageSet(ImageSet& imageSet, int& validRows, bool& complete) {
imageSet.setWidth(0);
imageSet.setHeight(0);
complete = false;
if(!receiveHeaderParsed) {
// We haven't even received the image header yet
return false;
} else {
// We received at least some pixel data
imageSet.setNumberOfImages(receiveHeader.numberOfImages);
bool flaggedDisparityPair = (receiveHeader.isRawImagePair_OBSOLETE == 0); // only meaningful in headers <=V2
bool isInterleaved = (receiveHeader.flags & HeaderData::FlagBits::NEW_STYLE_TRANSFER) == 0;
bool arbitraryChannels = (receiveHeader.flags & HeaderData::FlagBits::HEADER_V3) > 0;
bool hasExposureTime = (receiveHeader.flags & HeaderData::FlagBits::HEADER_V4) > 0;
// Forward compatibility check: mask out all known flag bits and see what remains
unsigned short unaccountedFlags = receiveHeader.flags & ~(HeaderData::FlagBits::NEW_STYLE_TRANSFER
| HeaderData::FlagBits::HEADER_V3 | HeaderData::FlagBits::HEADER_V4);
if (unaccountedFlags != 0) {
// Newer protocol (unknown flag present) - we will try to continue
// since connection has not been refused earlier
static bool warnedOnceForward = false;
if (!warnedOnceForward) {
LOG_WARN("Warning: forward-compatible mode; will attempt to process image stream with unknown extra flags. Consider upgrading the client software.");
warnedOnceForward = true;
}
}
imageSet.setWidth(receiveHeader.width);
imageSet.setHeight(receiveHeader.height);
imageSet.setPixelFormat(0, static_cast<ImageSet::ImageFormat>(receiveHeader.format0));
if (imageSet.getNumberOfImages() > 1) imageSet.setPixelFormat(1, static_cast<ImageSet::ImageFormat>(receiveHeader.format1));
if (imageSet.getNumberOfImages() > 2) imageSet.setPixelFormat(2, static_cast<ImageSet::ImageFormat>(receiveHeader.format2));
int rowStrideArr[ImageSet::MAX_SUPPORTED_IMAGES] = {0};
int validRowsArr[ImageSet::MAX_SUPPORTED_IMAGES] = {0};
unsigned char* pixelArr[ImageSet::MAX_SUPPORTED_IMAGES] = {nullptr};
if (isInterleaved) {
// OLD transfer (forced to interleaved 2 images mode)
static bool warnedOnceBackward = false;
if (!warnedOnceBackward) {
LOG_WARN("Info: backward-compatible mode; the device is sending with a legacy protocol. Consider upgrading its firmware.");
warnedOnceBackward = true;
}
unsigned char* data = dataProt.getBlockReceiveBuffer(0);
int validBytes = dataProt.getBlockValidSize(0);
for (int i=0; i < 2; ++i) {
pixelArr[i] = decodeInterleaved(i, imageSet.getNumberOfImages(), validBytes, data, validRowsArr[i], rowStrideArr[i]);
}
// Legacy sender with mode-dependent channel selection
imageSet.setIndexOf(ImageSet::ImageType::IMAGE_LEFT, 0);
imageSet.setIndexOf(ImageSet::ImageType::IMAGE_RIGHT, flaggedDisparityPair ? -1 : 1);
imageSet.setIndexOf(ImageSet::ImageType::IMAGE_DISPARITY, flaggedDisparityPair ? 1 : -1);
} else {
// NEW transfer
try {
for (int i=0; i<receiveHeader.numberOfImages; ++i) {
unsigned char* data = dataProt.getBlockReceiveBuffer(i);
int validBytes = dataProt.getBlockValidSize(i);
pixelArr[i] = decodeNoninterleaved(i, imageSet.getNumberOfImages(), validBytes, data, validRowsArr[i], rowStrideArr[i]);
}
} catch(const ProtocolException& ex) {
LOG_WARN("Protocol exception: " + ex.what());
resetReception();
return false;
}
if (arbitraryChannels) {
// Completely customizable channel selection
imageSet.setIndexOf(ImageSet::ImageType::IMAGE_LEFT, -1);
imageSet.setIndexOf(ImageSet::ImageType::IMAGE_RIGHT, -1);
imageSet.setIndexOf(ImageSet::ImageType::IMAGE_DISPARITY, -1);
for (int i=0; i<imageSet.getNumberOfImages(); ++i) {
int typ = receiveHeader.imageTypes[i];
ImageSet::ImageType imgtype = static_cast<ImageSet::ImageType>(typ);
imageSet.setIndexOf(imgtype, i);
}
} else {
static bool warnedOnceV2 = false;
if (!warnedOnceV2) {
LOG_WARN("Info: received a transfer with header v2");
warnedOnceV2 = true;
}
// Older v2 header; accessing imageTypes is not valid
// Two-image sender with mode-dependent channel selection
imageSet.setIndexOf(ImageSet::ImageType::IMAGE_LEFT, 0);
imageSet.setIndexOf(ImageSet::ImageType::IMAGE_RIGHT, flaggedDisparityPair ? -1 : 1);
imageSet.setIndexOf(ImageSet::ImageType::IMAGE_DISPARITY, flaggedDisparityPair ? 1 : -1);
}
if(hasExposureTime) {
imageSet.setExposureTime(receiveHeader.exposureTime);
imageSet.setLastSyncPulse(receiveHeader.lastSyncPulseSec, receiveHeader.lastSyncPulseMicrosec);
}
}
for (int i=0; i<receiveHeader.numberOfImages; ++i) {
imageSet.setRowStride(i, rowStrideArr[i]);
imageSet.setPixelData(i, pixelArr[i]);
}
imageSet.setQMatrix(receiveHeader.q);
imageSet.setSequenceNumber(receiveHeader.seqNum);
imageSet.setTimestamp(receiveHeader.timeSec, receiveHeader.timeMicrosec);
imageSet.setDisparityRange(receiveHeader.minDisparity, receiveHeader.maxDisparity);
imageSet.setSubpixelFactor(receiveHeader.subpixelFactor);
validRows = validRowsArr[0];
for (int i=0; i<receiveHeader.numberOfImages; ++i) {
if (validRowsArr[i] < validRows) {
validRows = validRowsArr[i];
}
}
if(validRows == receiveHeader.height || receptionDone) {
complete = true;
resetReception();
}
return true;
}
}
unsigned char* ImageProtocol::Pimpl::decodeNoninterleaved(int imageNumber, int numImages, int receivedBytes,
unsigned char* data, int& validRows, int& rowStride) {
ImageSet::ImageFormat format;
int bits = 8;
switch (imageNumber) {
case 0: {
format = static_cast<ImageSet::ImageFormat>(receiveHeader.format0);
break;
}
case 1: {
format = static_cast<ImageSet::ImageFormat>(receiveHeader.format1);
break;
}
case 2: {
format = static_cast<ImageSet::ImageFormat>(receiveHeader.format2);
break;
}
default:
throw ProtocolException("Not implemented: decodeNoninterleaved with image index > 2");
}
bits = getFormatBits(static_cast<ImageSet::ImageFormat>(format), false);
int totalBits = bits;
unsigned char* ret = nullptr;
if(receiveHeader.lastTileWidth == 0) {
int bufferOffset0 = 0;
int bufferRowStride = receiveHeader.width*(totalBits) / 8;
if(format == ImageSet::FORMAT_8_BIT_MONO || format == ImageSet::FORMAT_8_BIT_RGB) {
// No decoding is necessary. We can just pass through the
// data pointer
ret = &data[bufferOffset0];
rowStride = bufferRowStride;
validRows = receivedBytes / bufferRowStride;
} else {
// Perform 12-bit => 16 bit decoding
allocateDecodeBuffer(imageNumber);
validRows = receivedBytes / bufferRowStride;
rowStride = 2*receiveHeader.width;
int lastRow = lastReceivedPayloadBytes[imageNumber] / bufferRowStride;
BitConversions::decode12BitPacked(lastRow, validRows, &data[bufferOffset0],
&decodeBuffer[imageNumber][0], bufferRowStride, rowStride, receiveHeader.width);
ret = &decodeBuffer[imageNumber][0];
}
} else {
// Decode the tiled transfer
decodeTiledImage(imageNumber,
lastReceivedPayloadBytes[imageNumber], receivedBytes, data,
receiveHeader.firstTileWidth * (totalBits) / 8,
receiveHeader.middleTilesWidth * (totalBits) / 8,
receiveHeader.lastTileWidth * (totalBits) / 8,
validRows, format, false);
ret = &decodeBuffer[imageNumber][0];
rowStride = receiveHeader.width*getFormatBits(
static_cast<ImageSet::ImageFormat>(format), true)/8;
}
lastReceivedPayloadBytes[imageNumber] = receivedBytes;
return ret;
}
unsigned char* ImageProtocol::Pimpl::decodeInterleaved(int imageNumber, int numImages, int receivedBytes,
unsigned char* data, int& validRows, int& rowStride) {
ImageSet::ImageFormat format = static_cast<ImageSet::ImageFormat>(
imageNumber == 0 ? receiveHeader.format0 : receiveHeader.format1);
int bits0 = getFormatBits(static_cast<ImageSet::ImageFormat>(receiveHeader.format0), false);
int bits1 = getFormatBits(static_cast<ImageSet::ImageFormat>(receiveHeader.format1), false);
int bits2 = getFormatBits(static_cast<ImageSet::ImageFormat>(receiveHeader.format2), false);
int totalBits = (numImages<3)?(bits0 + bits1):(bits0 + bits1 + bits2);
unsigned char* ret = nullptr;
if(receiveHeader.lastTileWidth == 0) {
int bufferOffset;
switch (imageNumber) {
case 0: { bufferOffset = 0; break; }
case 1: { bufferOffset = receiveHeader.width * bits0/8; break; }
case 2: { bufferOffset = receiveHeader.width * (bits0 + bits1)/8; break; }
default:
throw ProtocolException("Not implemented: image index > 2");
}
int bufferRowStride = receiveHeader.width*(totalBits) / 8;
if(format == ImageSet::FORMAT_8_BIT_MONO || format == ImageSet::FORMAT_8_BIT_RGB) {
// No decoding is necessary. We can just pass through the
// data pointer
ret = &data[bufferOffset];
rowStride = bufferRowStride;
validRows = receivedBytes / bufferRowStride;
} else {
// Perform 12-bit => 16 bit decoding
allocateDecodeBuffer(imageNumber);
validRows = std::min(receivedBytes / bufferRowStride, (int)receiveHeader.height);
rowStride = 2*receiveHeader.width;
int lastRow = lastReceivedPayloadBytes[imageNumber] / bufferRowStride;
BitConversions::decode12BitPacked(lastRow, validRows, &data[bufferOffset],
&decodeBuffer[imageNumber][0], bufferRowStride, rowStride, receiveHeader.width);
ret = &decodeBuffer[imageNumber][0];
}
} else {
// Decode the tiled transfer
decodeTiledImage(imageNumber,
lastReceivedPayloadBytes[imageNumber], receivedBytes, data,
receiveHeader.firstTileWidth * (totalBits) / 8,
receiveHeader.middleTilesWidth * (totalBits) / 8,
receiveHeader.lastTileWidth * (totalBits) / 8,
validRows, format, true);
ret = &decodeBuffer[imageNumber][0];
rowStride = receiveHeader.width*getFormatBits(
static_cast<ImageSet::ImageFormat>(format), true)/8;
}
lastReceivedPayloadBytes[imageNumber] = receivedBytes;
return ret;
}
void ImageProtocol::Pimpl::allocateDecodeBuffer(int imageNumber) {
ImageSet::ImageFormat format;
switch (imageNumber) {
case 0: {
format = static_cast<ImageSet::ImageFormat>(receiveHeader.format0);
break;
}
case 1: {
format = static_cast<ImageSet::ImageFormat>(receiveHeader.format1);
break;
}
case 2: {
format = static_cast<ImageSet::ImageFormat>(receiveHeader.format2);
break;
}
default:
throw ProtocolException("Not implemented: allocateDecodeBuffer with image index > 2");
}
int bitsPerPixel = getFormatBits(format, true);
int bufferSize = receiveHeader.width * receiveHeader.height * bitsPerPixel / 8;
if(decodeBuffer[imageNumber].size() != static_cast<unsigned int>(bufferSize)) {
decodeBuffer[imageNumber].resize(bufferSize);
}
}
void ImageProtocol::Pimpl::decodeTiledImage(int imageNumber, int lastReceivedPayloadBytes, int receivedPayloadBytes,
const unsigned char* data, int firstTileStride, int middleTilesStride, int lastTileStride, int& validRows,
ImageSet::ImageFormat format, bool dataIsInterleaved) {
// Allocate a decoding buffer
allocateDecodeBuffer(imageNumber);
// Get beginning and end of first tile
int numTiles = getNumTiles(receiveHeader.width, receiveHeader.firstTileWidth,
receiveHeader.middleTilesWidth, receiveHeader.lastTileWidth);
int payloadOffset = 0;
int decodeXOffset = 0;
int prevTileStrides = 0;
for(int i = 0; i < numTiles; i++) {
// Get relevant parameters
int tileWidth = 0;
int tileStride = 0;
if(i == 0) {
tileStride = firstTileStride;
tileWidth = receiveHeader.firstTileWidth;
} else if(i == numTiles-1) {
tileStride = lastTileStride;
tileWidth = receiveHeader.lastTileWidth;
} else {
tileStride = middleTilesStride;
tileWidth = receiveHeader.middleTilesWidth;
}
int tileStart = std::max(0, (lastReceivedPayloadBytes - payloadOffset) / tileStride);
int tileStop = std::min(std::max(0, (receivedPayloadBytes - payloadOffset) / tileStride), (int)receiveHeader.height);
int tileOffset;
if (dataIsInterleaved) {
switch (imageNumber) {
case 0: { tileOffset = 0; break; }
case 1: { tileOffset = tileWidth * (
getFormatBits(static_cast<ImageSet::ImageFormat>(receiveHeader.format0), false)
)/8; break; }
case 2: { tileOffset = tileWidth * (
getFormatBits(static_cast<ImageSet::ImageFormat>(receiveHeader.format0), false)
+ getFormatBits(static_cast<ImageSet::ImageFormat>(receiveHeader.format1), false)
)/8; break; }
default:
throw ProtocolException("Not implemented: image index > 2");
}
} else {
tileOffset = 0;
}
if(i > 0) {
tileOffset += receiveHeader.height * prevTileStrides;
}
// Decode
int bytesPixel;
if(format == ImageSet::FORMAT_12_BIT_MONO) {
bytesPixel = 2;
BitConversions::decode12BitPacked(tileStart, tileStop, &data[tileOffset],
&decodeBuffer[imageNumber][decodeXOffset], tileStride, 2*receiveHeader.width, tileWidth);
} else {
bytesPixel = (format == ImageSet::FORMAT_8_BIT_RGB ? 3 : 1);
decodeRowsFromTile(tileStart, tileStop, &data[tileOffset],
&decodeBuffer[imageNumber][decodeXOffset], tileStride,
receiveHeader.width*bytesPixel, tileWidth*bytesPixel);
}
payloadOffset += receiveHeader.height * tileStride;
decodeXOffset += tileWidth * bytesPixel;
prevTileStrides += tileStride;
if(i == numTiles-1) {
validRows = tileStop;
}
}
}
void ImageProtocol::Pimpl::decodeRowsFromTile(int startRow, int stopRow, unsigned const char* src,
unsigned char* dst, int srcStride, int dstStride, int tileWidth) {
for(int y = startRow; y < stopRow; y++) {
memcpy(&dst[y*dstStride], &src[y*srcStride], tileWidth);
}
}
void ImageProtocol::Pimpl::resetReception() {
receiveHeaderParsed = false;
for (int i=0; i<ImageSet::MAX_SUPPORTED_IMAGES; ++i) {
lastReceivedPayloadBytes[i] = 0;
}
dataProt.resetReception(false);
receptionDone = false;
}
bool ImageProtocol::Pimpl::isConnected() const {
return dataProt.isConnected();
}
const unsigned char* ImageProtocol::Pimpl::getNextControlMessage(int& length) {
return dataProt.getNextControlMessage(length);
}
bool ImageProtocol::Pimpl::newClientConnected() {
return dataProt.newClientConnected();
}
int ImageProtocol::Pimpl::getNumDroppedFrames() const {
return dataProt.getDroppedReceptions();
}
std::string ImageProtocol::statusReport() {
return pimpl->statusReport();
}
std::string ImageProtocol::Pimpl::statusReport() {
return dataProt.statusReport();
}
} // namespace

View file

@ -0,0 +1,259 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_IMAGEPROTOCOL_H
#define VISIONTRANSFER_IMAGEPROTOCOL_H
#include "visiontransfer/common.h"
#include "visiontransfer/imageset.h"
#include <vector>
namespace visiontransfer {
/**
* \brief A lightweight protocol for transferring image sets.
*
* Two images are transferred together as a set. These are usually the left
* and right image of a stereo camera, or the left image and a disparity map.
*
* The images are 8- or 12-bit monochrome, or 8-bit RGB color. For simplicity,
* 12-bit images are inflated to 16-bit by introducing additional padding
* bits. Both images must always have the same image size.
*
* When receiving, the class takes in an image set and chops it down to several
* network messages. When sending, the class takes a set of messages and
* assembles them into an image set. We have to differentiate between TCP and
* UDP in both cases.
*/
class VT_EXPORT ImageProtocol {
public:
/// Supported network protocols
enum ProtocolType {
/// The connection oriented TCP transport protocol
PROTOCOL_TCP,
/// The connection-less UDP transport protocol
PROTOCOL_UDP
};
/**
* \brief Creates a new instance for decoding / encoding network messages
* for the given network protocol.
*
* \param server If set to true, this object will be a communication server.
* \param maxUdpPacketSize Maximum allowed size of a UDP packet when sending data.
*/
ImageProtocol(bool server, ProtocolType protType, int maxUdpPacketSize = 1472);
~ImageProtocol();
/**
* \brief Sets a new image that will be transfer.
*
* \param imageSet The image set that shall be transmitted.
*
* After setting the transfer image, subsequent calls to
* getTransferMessage() can be made for obtaining the encoded
* network messages.
*/
void setTransferImageSet(const ImageSet& imageSet);
/**
* \brief Sets the already pre-formatted image data for the next transfer.
*
* \param metaData ImageSet object containing all the meta data but no
* pixel data.
* \param rawData Pre-formatted data for this transfer.
* \param firstTileWidth If not 0, specifies the width of the first tile in
* a tiled transfer.
* \param middleTilesWidth If not 0, specifies the width of the tiles between
the first and the last tile in a tiled transfer.
* \param lastTileWidth If not 0, specifies the width of the last tile in
* a tiled transfer.
* \param validBytes The number of bytes that are valid in \c rawData.
*
* This method is a more efficient alternative to setTransferImage(). In this case
* the image data already has to be pre-formatted in the format of
* the image protocol, which means row-wise interleaving both images.
* For 12-bit images, the pixel data must be packed in LSB order.
*
* The pixel data contained in \c metaData is ignored, while all
* other data is transmitted. The actual pixel data must be encoded in
* \c rawData.
*
* Parts of \c rawData will be overwritten. There must be at least 4 additional
* allocated bytes after the pixel data in \c rawData.
*
* If \c validBytes is set to a value smaller than the total transfer
* size, only a partial transfer is performed. Subsequent calls to
* setRawValidBytes() are then necessary.
*/
void setRawTransferData(const ImageSet& metaData, const std::vector<unsigned char*>& imageData,
int firstTileWidth = 0, int middleTilesWidth = 0, int lastTileWidth = 0, int validBytes = 0x7FFFFFFF);
/**
* \brief Updates the number of valid bytes in a partial raw transfer.
*
* \param validBytes The number of already valid bytes in the previously
* set raw data pointer.
*
* This method has to be called whenever new data is available in a raw
* transfer. \see setRawTransferData()
*/
void setRawValidBytes(const std::vector<int>& validBytes);
/**
* \brief Gets the next network message for the current transfer.
*
* \param length The length of the network message.
* \return Pointer to the network message data.
*
* If the transfer has already been completed, a null pointer is returned.
*/
const unsigned char* getTransferMessage(int& length);
/**
* \brief Returns true if the current transfer has been completed.
*/
bool transferComplete();
/**
* \brief Aborts the transmission of the current transfer and performs a
* reset of the internal state.
*/
void resetTransfer();
/**
* \brief Returns a received image when complete.
*
* \param imageSet Will be set to the received image set.
* \return Returns true if a new image set has been received. Otherwise
* false.
*
* After obtaining a received image set, reception is reset and
* subsequent calls to getReceivedImageSet() or imagesReceived()
* will return false.
*
* Please note that the received image data is only valid until processing
* the first network message of the next image transfer.
*/
bool getReceivedImageSet(ImageSet& imageSet);
/**
* \brief Returns a partially received image.
*
* \param imageSet Will be set to the partially received image set.
* \param validRows The number of valid image rows in the partially received
image set.
* \param complete True if the image set has been fully received.
* \return Returns true if a full or partial image have been received.
* Otherwise false.
*
* If a partial image has been received, the meta data returned in
* \c imageSet will be complete. The pixel data contained in
* \c imageSet, however, will only be valid for the first
* \c validRows rows.
*
* After obtaining a complete image set, reception is reset and
* subsequent calls to getPartiallyReceivedImageSet() or imagesReceived()
* will return false.
*
* Please note that the received image data is only valid until processing
* the first network message of the next image transfer.
*/
bool getPartiallyReceivedImageSet(ImageSet& imageSet, int& validRows, bool& complete);
/**
* \brief Returns true if the images of the current transfer have been received.
*/
bool imagesReceived() const;
/**
* \brief Returns the buffer for receiving the next network message.
*
* \param maxLength Maximum allowed length for the next network message
* \return Pointer to the buffer memory.
**/
unsigned char* getNextReceiveBuffer(int& maxLength);
/**
* \brief Handles a received network message
*
* \param length Length of the received network message.
*
* This method has to be called for every network message that has been
* received. The message data must be located in the most recent buffer
* that has been obtained with getNextReceiveBuffer().
*
* After calling this method, please check if a new image has been received
* by calling getReceivedImageSet() or getPartiallyReceivedImageSet().
*
* In order to handle connection timeouts this method should be called
* frequently. If no new data is available, a length of 0 can be passed.
*/
void processReceivedMessage(int length);
/**
* \brief Returns the number of frames that have been dropped since
* connecting to the current remote host.
*
* Dropped frames are caused by dropped packets due to a poor network
* connection
*/
int getNumDroppedFrames() const;
/**
* \brief Aborts the reception of the current image transfer and resets
* the internal state.
*/
void resetReception();
/**
* \brief Returns true if the last message has established a new connection
* from a client
*/
bool newClientConnected();
/**
* \brief Returns true if a remote connection is established
*/
bool isConnected() const;
/**
* \brief If a control message is pending to be transmitted then
* the message data will be returned by this method.
*
* \param length Will be set to the length of the message
* \return Pointer to the message data or NULL if no message is pending
*
* Control messages are only needed when using the UDP network protocol.
*/
const unsigned char* getNextControlMessage(int& length);
std::string statusReport();
private:
// We follow the pimpl idiom
class Pimpl;
Pimpl* pimpl;
// This class cannot be copied
ImageProtocol(const ImageProtocol& other);
ImageProtocol& operator=(const ImageProtocol&);
};
} // namespace
#endif

View file

@ -0,0 +1,52 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_IMAGESET_OPENCV_H
#define VISIONTRANSFER_IMAGESET_OPENCV_H
#ifdef CV_MAJOR_VERSION
namespace visiontransfer {
/*
* OpenCV-specific implementations that need to be inlined in order to avoid
* dependencies for projects that do not make use of OpenCV
*/
inline void ImageSet::toOpenCVImage(int imageNumber, cv::Mat& dest, bool convertRgbToBgr) {
assert(imageNumber >= 0 && imageNumber < getNumberOfImages());
switch(formats[imageNumber]) {
case ImageSet::FORMAT_12_BIT_MONO:
dest= cv::Mat_<unsigned short>(height, width,
reinterpret_cast<unsigned short*>(data[imageNumber]), rowStride[imageNumber]);
break;
case ImageSet::FORMAT_8_BIT_MONO:
dest = cv::Mat_<unsigned char>(height, width,
data[imageNumber], rowStride[imageNumber]);
break;
case ImageSet::FORMAT_8_BIT_RGB:
dest = cv::Mat_<cv::Vec3b>(height, width,
reinterpret_cast<cv::Vec3b*>(data[imageNumber]), rowStride[imageNumber]);
if(convertRgbToBgr) {
cv::cvtColor(dest, dest, cv::COLOR_RGB2BGR);
}
break;
}
}
} // namespace
#endif
#endif

View file

@ -0,0 +1,254 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include <iostream>
#include <fstream>
#include <stdexcept>
#include <cstring>
#include "visiontransfer/imageset.h"
#ifdef _WIN32
#include <winsock2.h>
#else
#include <arpa/inet.h>
#endif
using namespace visiontransfer;
namespace visiontransfer {
ImageSet::ImageSet()
: width(0), height(0), qMatrix(NULL), timeSec(0), timeMicrosec(0),
seqNum(0), minDisparity(0), maxDisparity(0), subpixelFactor(16),
referenceCounter(NULL), numberOfImages(2), indexLeftImage(0), indexRightImage(1), indexDisparityImage(-1),
exposureTime(0), lastSyncPulseSec(0), lastSyncPulseMicrosec(0) {
for (int i=0; i<MAX_SUPPORTED_IMAGES; ++i) {
formats[i] = FORMAT_8_BIT_MONO;
data[i] = NULL;
rowStride[i] = 0;
}
}
ImageSet::ImageSet(const ImageSet& other) {
copyData(*this, other, true);
}
ImageSet& ImageSet::operator= (ImageSet const& other) {
if(&other != this) {
decrementReference();
copyData(*this, other, true);
}
return *this;
}
ImageSet::~ImageSet() {
decrementReference();
}
void ImageSet::copyData(ImageSet& dest, const ImageSet& src, bool countRef) {
dest.width = src.width;
dest.height = src.height;
dest.numberOfImages = src.numberOfImages;
for(int i=0; i<src.numberOfImages; i++) {
dest.rowStride[i] = src.rowStride[i];
dest.formats[i] = src.formats[i];
dest.data[i] = src.data[i];
}
dest.qMatrix = src.qMatrix;
dest.timeSec = src.timeSec;
dest.timeMicrosec = src.timeMicrosec;
dest.seqNum = src.seqNum;
dest.minDisparity = src.minDisparity;
dest.maxDisparity = src.maxDisparity;
dest.subpixelFactor = src.subpixelFactor;
dest.referenceCounter = src.referenceCounter;
dest.numberOfImages = src.numberOfImages;
dest.indexLeftImage = src.indexLeftImage;
dest.indexRightImage = src.indexRightImage;
dest.indexDisparityImage = src.indexDisparityImage;
dest.exposureTime = src.exposureTime;
dest.lastSyncPulseSec = src.lastSyncPulseSec;
dest.lastSyncPulseMicrosec = src.lastSyncPulseMicrosec;
if(dest.referenceCounter != nullptr && countRef) {
(*dest.referenceCounter)++;
}
}
void ImageSet::decrementReference() {
if(referenceCounter != nullptr && --(*referenceCounter) == 0) {
for (int i=0; i<getNumberOfImages(); ++i) {
delete []data[i];
data[i] = nullptr;
}
delete []qMatrix;
delete referenceCounter;
qMatrix = nullptr;
referenceCounter = nullptr;
}
}
void ImageSet::writePgmFile(int imageNumber, const char* fileName) const {
if(imageNumber < 0 || imageNumber >= getNumberOfImages()) {
throw std::runtime_error("Illegal image number!");
}
std::fstream strm(fileName, std::ios::out | std::ios::binary);
// Write PGM / PBM header
int type, maxVal, bytesPerChannel, channels;
switch(formats[imageNumber]) {
case FORMAT_8_BIT_MONO:
type = 5;
maxVal = 255;
bytesPerChannel = 1;
channels = 1;
break;
case FORMAT_12_BIT_MONO:
type = 5;
maxVal = 4095;
bytesPerChannel = 2;
channels = 1;
break;
case FORMAT_8_BIT_RGB:
type = 6;
maxVal = 255;
bytesPerChannel = 1;
channels = 3;
break;
default:
throw std::runtime_error("Illegal pixel format!");
}
strm << "P" << type << " " << width << " " << height << " " << maxVal << std::endl;
// Write image data
for(int y = 0; y < height; y++) {
for(int x = 0; x < width*channels; x++) {
unsigned char* pixel = &data[imageNumber][y*rowStride[imageNumber] + x*bytesPerChannel];
if(bytesPerChannel == 2) {
// Swap endianess
unsigned short swapped = htons(*reinterpret_cast<unsigned short*>(pixel));
strm.write(reinterpret_cast<char*>(&swapped), sizeof(swapped));
} else {
strm.write(reinterpret_cast<char*>(pixel), 1);
}
}
}
}
int ImageSet::getBitsPerPixel(ImageFormat format) {
switch(format) {
case FORMAT_8_BIT_MONO: return 8;
case FORMAT_8_BIT_RGB: return 24;
case FORMAT_12_BIT_MONO: return 12;
default: throw std::runtime_error("Invalid image format!");
}
}
void ImageSet::copyTo(ImageSet& dest) {
dest.decrementReference();
copyData(dest, *this, false);
dest.qMatrix = new float[16];
memcpy(const_cast<float*>(dest.qMatrix), qMatrix, sizeof(float)*16);
for(int i=0; i<getNumberOfImages(); i++) {
int bytesPixel = getBytesPerPixel(i);
dest.rowStride[i] = width*bytesPixel;
dest.data[i] = new unsigned char[height*dest.rowStride[i]];
// Convert possibly different row strides
for(int y = 0; y < height; y++) {
memcpy(&dest.data[i][y*dest.rowStride[i]], &data[i][y*rowStride[i]],
dest.rowStride[i]);
}
}
dest.referenceCounter = new int;
(*dest.referenceCounter) = 1;
}
int ImageSet::getBytesPerPixel(ImageFormat format) {
switch(format) {
case FORMAT_8_BIT_MONO: return 1;
case FORMAT_8_BIT_RGB: return 3;
case FORMAT_12_BIT_MONO: return 2;
default: throw std::runtime_error("Invalid image format!");
}
}
ImageSet::ImageType ImageSet::getImageType(int imageNumber) const {
assert(imageNumber >= 0 && imageNumber < getNumberOfImages());
if (imageNumber == getIndexOf(ImageSet::ImageType::IMAGE_LEFT)) return ImageSet::ImageType::IMAGE_LEFT;
if (imageNumber == getIndexOf(ImageSet::ImageType::IMAGE_RIGHT)) return ImageSet::ImageType::IMAGE_RIGHT;
if (imageNumber == getIndexOf(ImageSet::ImageType::IMAGE_DISPARITY)) return ImageSet::ImageType::IMAGE_DISPARITY;
throw std::runtime_error("Invalid image number for getImageType!");
}
void ImageSet::setImageDisparityPair(bool dispPair) {
if (getNumberOfImages() != 2) throw std::runtime_error("setImageDisparityPair is only supported for two-image sets");
// Let index assignments directly follow the mode
indexLeftImage = 0;
indexRightImage = dispPair ? -1 : 1;
indexDisparityImage = dispPair ? 1 : -1;
}
int ImageSet::getIndexOf(ImageType what, bool throwIfNotFound) const {
int idx = -1;
switch(what) {
case IMAGE_LEFT: {
idx = indexLeftImage;
break;
}
case IMAGE_RIGHT: {
idx = indexRightImage;
break;
}
case IMAGE_DISPARITY: {
idx = indexDisparityImage;
break;
}
default:
throw std::runtime_error("Invalid ImageType for query!");
}
if (throwIfNotFound && (idx==-1)) throw std::runtime_error("ImageSet does not contain the queried ImageType");
return idx;
}
void ImageSet::setIndexOf(ImageType what, int idx) {
switch(what) {
case IMAGE_LEFT: {
indexLeftImage = idx;
break;
}
case IMAGE_RIGHT: {
indexRightImage = idx;
break;
}
case IMAGE_DISPARITY: {
indexDisparityImage = idx;
break;
}
default:
std::cout << "what=" << what << std::endl;
throw std::runtime_error("Invalid ImageType for setIndexOf!");
}
}
} // namespace

View file

@ -0,0 +1,548 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_IMAGESET_H
#define VISIONTRANSFER_IMAGESET_H
#include <cassert>
#include <cstddef>
#include "visiontransfer/common.h"
namespace visiontransfer {
/**
* \brief A set of one to three images, but usually two (the left camera image
* and the disparity map). One- and three-image modes can be enabled
* in the device configuration interface.
*
* For backwards compatibility, for sets of at least two images the image at
* index 0 is always the left camera image, while the one at index 1 is either the
* disparity map if in disparity processing mode, or the right image otherwise.
*
* All images must be of equal width and height, but are allowed to have
* different pixel formats. Please note that the class does not manage the
* pixel data but only keeps pointers. You thus need to ensure that the pixel
* data remains valid for as long as this object persists.
*/
class VT_EXPORT ImageSet {
public:
static const int MAX_SUPPORTED_IMAGES = 3;
/**
* \brief Image formats that can be transferred.
*/
enum ImageFormat {
/// 8-bit greyscale format
FORMAT_8_BIT_MONO,
/// 8-bit RGB format
FORMAT_8_BIT_RGB,
/// 12-bit greyscale format plus 4 bits of padding
/// (hence a total of 16 bits).
FORMAT_12_BIT_MONO
};
/**
* \deprecated Please use the new format constants in \c ImageFormat.
*/
enum ImageFormat_Deprecated {
FORMAT_8_BIT = FORMAT_8_BIT_MONO,
FORMAT_12_BIT = FORMAT_12_BIT_MONO
};
/**
* \brief Supported image types
*/
enum ImageType {
IMAGE_UNDEFINED,
IMAGE_LEFT,
IMAGE_DISPARITY,
IMAGE_RIGHT,
};
/**
* \brief Default constructor creating an image set with no pixel data.
*/
ImageSet();
/**
* \brief Copy constructor creating a shallow copy of the image set.
*/
ImageSet(const ImageSet& other);
~ImageSet();
ImageSet& operator= (ImageSet const& other);
/**
* \brief Sets a new width for both images.
*/
void setWidth(int w) {width = w;}
/**
* \brief Sets a new width for both images.
*/
void setHeight(int h) {height = h;}
/**
* \brief Sets a new row stride for the pixel data of one image.
*
* \param imageNumber Number of the image for which to set the
* row stride (0 ... getNumberOfImages()-1).
* \param stride The row stride that shall be set.
*/
void setRowStride(int imageNumber, int stride) {
assert(imageNumber >= 0 && imageNumber < getNumberOfImages());
rowStride[imageNumber] = stride;
}
/**
* \brief Sets the pixel format for the given image.
*
* \param imageNumber Number of the image for which to set the
* pixel format (0 ... getNumberOfImages()-1).
* \param format The pixel format that shall be set.
*/
void setPixelFormat(int imageNumber, ImageFormat format) {
assert(imageNumber >= 0 && imageNumber < getNumberOfImages());
formats[imageNumber] = format;
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
DEPRECATED("Use setPixelFormat(int, ImageFormat) instead") void setPixelFormat(int imageNumber, ImageFormat_Deprecated format) {
setPixelFormat(imageNumber, static_cast<ImageFormat>(format));
}
#endif
/**
* \brief Sets the pixel data for the given image.
*
* \param imageNumber Number of the image for which to set the
* pixel data (0 ... getNumberOfImages()-1).
* \param pixelData The pixel data that shall be set.
*/
void setPixelData(int imageNumber, unsigned char* pixelData) {
assert(imageNumber >= 0 && imageNumber < getNumberOfImages());
data[imageNumber] = pixelData;
}
/**
* \brief Sets the pointer to the disparity-to-depth mapping matrix q.
*
* No data is copied. The data which q is pointing to has to remain valid
* for as long as this object exists.
*/
void setQMatrix(const float* q) {
qMatrix = q;
}
/**
* \brief Sets the sequence number for this image set.
*/
void setSequenceNumber(unsigned int num) {
seqNum = num;
}
/**
* \brief Sets the time at which this image set has been captured.
*
* \param seconds The time stamp with a resolution of one second.
* \param microsec The fractional seconds part of the time stamp with
* a resolution of 1 microsecond.
*/
void setTimestamp(int seconds, int microsec) {
timeSec = seconds;
timeMicrosec = microsec;
}
/**
* \brief Sets the value range for the disparity map contained in this
* image set.
*
* \param minimum Minimum disparity value.
* \param maximum Maximum disparity value.
*/
void setDisparityRange(int minimum, int maximum) {
minDisparity = minimum;
maxDisparity = maximum;
}
/**
* \brief Sets the subpixel factor for this image set.
*/
void setSubpixelFactor(int subpixFact) {
subpixelFactor = subpixFact;
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Sets whether this is a left camera image and disparity
* map set, or two raw camera images.
*
* DEPRECATION NOTICE: Please use setNumberOfImages() and setIndexOf()
* for more comprehensive control of the images in the image set.
*/
DEPRECATED("Only compatible with two-image sets: use setNumberOfImages() and setIndexOf() instead")
void setImageDisparityPair(bool dispPair);
#endif
/**
* \brief Returns the width of each image.
*/
int getWidth() const {return width;}
/**
* \brief Returns the height of each image.
*/
int getHeight() const {return height;}
/**
* \brief Returns the row stride for the pixel data of one image.
*
* \param imageNumber Number of the image for which to obtain the
* row stride (0 ... getNumberOfImages()-1).
*
* Please use getRowStride(ImageSet::ImageType) to access the
* data by their abstract role in lieu of their index in the set.
*/
int getRowStride(int imageNumber) const {
assert(imageNumber >= 0 && imageNumber < getNumberOfImages());
return rowStride[imageNumber];
}
/**
* \brief Returns the row stride for the pixel data of one image.
*
* \param what ImageType of the desired channel.
*
* This function will throw an exception when the ImageType
* is not present in this set (use hasImageType(what) to check).
*/
int getRowStride(ImageType what) const {
int idx = getIndexOf(what, true);
return getRowStride(idx);
}
/**
* \brief Returns the pixel format for the given image.
*
* \param imageNumber Number of the image for which to receive the
* pixel format (0 ... getNumberOfImages()-1).
*
* Please use getPixelFormat(ImageSet::ImageType) to access the
* data by their abstract role in lieu of their index in the set.
*/
ImageFormat getPixelFormat(int imageNumber) const {
assert(imageNumber >= 0 && imageNumber < getNumberOfImages());
return formats[imageNumber];
}
/**
* \brief Returns the pixel format for the given image.
*
* \param what ImageType of the desired channel.
*
* This function will throw an exception when the ImageType
* is not present in this set (use hasImageType(what) to check).
*/
ImageFormat getPixelFormat(ImageType what) const {
int idx = getIndexOf(what, true);
return getPixelFormat(idx);
}
/**
* \brief Returns the pixel data for the given image.
*
* \param imageNumber Number of the image for which to receive the
* pixel data (0 ... getNumberOfImages()-1).
*
* Please use getPixelData(ImageSet::ImageType) to access the
* data by their abstract role in lieu of their index in the set.
*/
unsigned char* getPixelData(int imageNumber) const {
assert(imageNumber >= 0 && imageNumber < getNumberOfImages());
return data[imageNumber];
}
/**
* \brief Returns the pixel data for the given image.
*
* \param what ImageType of the desired channel.
*
* This function will throw an exception when the ImageType
* is not present in this set (use hasImageType(what) to check).
*/
unsigned char* getPixelData(ImageType what) const {
int idx = getIndexOf(what, true);
return getPixelData(idx);
}
/**
* \brief Returns a pointer to the disparity-to-depth mapping matrix q.
*/
const float* getQMatrix() const {
return qMatrix;
}
/**
* \brief Returns the sequence number for this image set.
*/
unsigned int getSequenceNumber() const {return seqNum;}
/**
* \brief Returns the time at which this image set has been captured.
*
* \param seconds The time stamp with a resolution of one second.
* \param microsec The fractional seconds part of the time stamp with
* a resolution of 1 microsecond.
*/
void getTimestamp(int& seconds, int& microsec) const {
seconds = timeSec;
microsec = timeMicrosec;
}
/**
* \brief Gets the value range for the disparity map contained in this
* image set. If the image set does not contain any disparity data
* then the disparity range is undefined.
*
* \param minimum Minimum disparity value.
* \param maximum Maximum disparity value.
*/
void getDisparityRange(int& minimum, int& maximum) const {
minimum = minDisparity;
maximum = maxDisparity;
}
/**
* \brief Gets the subpixel factor for this image set.
*/
int getSubpixelFactor() const {
return subpixelFactor;
}
/**
* \brief Writes one image of the set to a PGM or PPM file.
*
* \param imageNumber The number of the image that shall be written.
* \param File name of the PGM or PPM file that shall be created.
*/
void writePgmFile(int imageNumber, const char* fileName) const;
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Returns true if this is a left camera image and disparity
* map pair.
*
* DEPRECATION NOTICE: this is a legacy function; in case of image sets
* with one or three images, the result may not be as expected
* (this functions returns true only for the case of exactly two images:
* left image plus disparity image).
*
* The new function hasImageType(ImageSet::ImageType) provides better
* granularity of what data are enabled and present.
*/
DEPRECATED("Only compatible with two-image sets: use hasImageType(ImageSet::IMAGE_DISPARITY) instead")
bool isImageDisparityPair() const {
return (getNumberOfImages()==2) && hasImageType(IMAGE_DISPARITY);
}
#endif
/**
* \brief Makes a deep copy of this image set.
*/
void copyTo(ImageSet& dest);
/**
* \brief Returns the number of bytes that are required to store one
* image pixel.
*
* \param imageNumber The number of the image (0 ... getNumberOfImages()-1).
*/
int getBytesPerPixel(int imageNumber) const {
assert(imageNumber >= 0 && imageNumber < getNumberOfImages());
return getBytesPerPixel(formats[imageNumber]);
}
/**
* \brief Returns the number of bits that are required to store one
* image pixel.
*
* \param imageNumber The number of the image (0 ... getNumberOfImages()-1).
*/
int getBitsPerPixel(int imageNumber) const {
assert(imageNumber >= 0 && imageNumber < getNumberOfImages());
return getBitsPerPixel(formats[imageNumber]);
}
int getBitsPerPixel(ImageType what) const {
int idx = getIndexOf(what, true);
return getBitsPerPixel(idx);
}
static int getBitsPerPixel(ImageFormat format);
/**
* \brief Returns the number of bytes that are required to store one
* image pixel with the given pixel format.
*/
static int getBytesPerPixel(ImageFormat format);
/**
* \brief Returns the number of images in this set
*/
int getNumberOfImages() const {
return numberOfImages;
}
/**
* \brief Sets the number of valid images in this set
*/
void setNumberOfImages(int number) {
assert(number >= 1 && number <= MAX_SUPPORTED_IMAGES);
numberOfImages = number;
}
/**
* \brief Returns the ImageType of the specified channel
*/
ImageType getImageType(int imageNumber) const;
/**
* \brief Returns the index of a specific image type
*
* \param what ImageType of the desired channel.
* \param throwIfNotFound Throw a runtime error instead of returning -1.
*
* \return Returns the index of the specified type, or -1 if not found.
*/
int getIndexOf(ImageType what, bool throwIfNotFound=false) const;
/**
* \brief Returns whether a left camera image is included in the enabled data
*/
bool hasImageType(ImageType what) const {
return getIndexOf(what) >= 0;
}
/**
* \brief Assign an image index to a specified ImageType, -1 to disable
*
* \param what The ImageType to assign a new image index to.
* \param idx The index of the specified ImageType inside the data of
* this ImageSet (-1 to disable).
*/
void setIndexOf(ImageType what, int idx);
#ifdef CV_MAJOR_VERSION
/**
* \brief Converts one image of the set to an OpenCV image.
*
* \param imageNumber The number of the image that shall be converted
* (0 ... getNumberOfImages()-1).
* \param convertRgbToBgr If true, then color images will converted from
* RGB to BGR in order to comply to OpenCV's convention.
*
* For this method to be available, the OpenCV headers need to be
* included before including headers for libvisiontransfer.
*
* Please note that only a shallow copy is performed. The ImageSet object
* must be kept alive for as long as the OpenCV image is in use.
*/
inline void toOpenCVImage(int imageNumber, cv::Mat& dest, bool convertRgbToBgr = true);
#endif
/**
* \brief Sets the exposure time that was used for capturing the image set
*
* \param timeMicrosec Exposure time measured in microseconds
*/
void setExposureTime(int timeMicrosec) {
exposureTime = timeMicrosec;
}
/**
* \brief Gets the exposure time in microseconds that was used for
* capturing the image set.
*
* \return Exposure time in microseconds
*/
int getExposureTime() const {
return exposureTime;
}
/**
* \brief Sets the timestamp of the last received sync pulse
* \param seconds The time stamp with a resolution of one second.
* \param microsec The fractional seconds part of the time stamp with
* a resolution of 1 microsecond.
*/
void setLastSyncPulse(int seconds, int microsec) {
lastSyncPulseSec = seconds;
lastSyncPulseMicrosec = microsec;
}
/**
* \brief Gets the timestamp of the last received sync pulse
*
* \param seconds The time stamp with a resolution of one second.
* \param microsec The fractional seconds part of the time stamp with
* a resolution of 1 microsecond.
*/
void getLastSyncPulse(int& seconds, int& microsec) const {
seconds = lastSyncPulseSec;
microsec = lastSyncPulseMicrosec;
}
private:
// No pimpl idiom here as almost everything is inlined.
int width;
int height;
int rowStride[MAX_SUPPORTED_IMAGES];
ImageFormat formats[MAX_SUPPORTED_IMAGES];
unsigned char* data[MAX_SUPPORTED_IMAGES];
const float* qMatrix;
int timeSec;
int timeMicrosec;
unsigned int seqNum;
int minDisparity;
int maxDisparity;
int subpixelFactor;
int* referenceCounter;
int numberOfImages;
int indexLeftImage;
int indexRightImage;
int indexDisparityImage;
int exposureTime;
int lastSyncPulseSec;
int lastSyncPulseMicrosec;
void copyData(ImageSet& dest, const ImageSet& src, bool countRef);
void decrementReference();
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS
// For source compatibility
class DEPRECATED("Use ImageSet instead.") ImagePair: public ImageSet {
};
#endif
} // namespace
#include "visiontransfer/imageset-opencv.h"
#endif

View file

@ -0,0 +1,642 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include <cstdio>
#include <iostream>
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include <mutex>
#include "visiontransfer/imagetransfer.h"
#include "visiontransfer/exceptions.h"
#include "visiontransfer/datablockprotocol.h"
#include "visiontransfer/networking.h"
using namespace std;
using namespace visiontransfer;
using namespace visiontransfer::internal;
namespace visiontransfer {
/*************** Pimpl class containing all private members ***********/
class ImageTransfer::Pimpl {
public:
Pimpl(const char* address, const char* service, ImageProtocol::ProtocolType protType,
bool server, int bufferSize, int maxUdpPacketSize);
~Pimpl();
// Redeclaration of public members
void setRawTransferData(const ImageSet& metaData, const std::vector<unsigned char*>& rawData,
int firstTileWidth = 0, int secondTileWidth = 0, int validBytes = 0x7FFFFFFF);
void setRawValidBytes(const std::vector<int>& validBytes);
void setTransferImageSet(const ImageSet& imageSet);
TransferStatus transferData();
bool receiveImageSet(ImageSet& imageSet);
bool receivePartialImageSet(ImageSet& imageSet, int& validRows, bool& complete);
int getNumDroppedFrames() const;
bool isConnected() const;
void disconnect();
std::string getRemoteAddress() const;
bool tryAccept();
std::string statusReport();
private:
// Configuration parameters
ImageProtocol::ProtocolType protType;
bool isServer;
int bufferSize;
int maxUdpPacketSize;
// Thread synchronization
std::recursive_mutex receiveMutex;
std::recursive_mutex sendMutex;
// Transfer related members
SOCKET clientSocket;
SOCKET tcpServerSocket;
sockaddr_in remoteAddress;
// Object for encoding and decoding the network protocol
std::unique_ptr<ImageProtocol> protocol;
// Outstanding network message that still has to be transferred
int currentMsgLen;
int currentMsgOffset;
const unsigned char* currentMsg;
// Socket configuration
void setSocketOptions();
// Network socket initialization
void initTcpServer(const addrinfo* addressInfo);
void initTcpClient(const addrinfo* addressInfo);
void initUdp(const addrinfo* addressInfo);
// Data reception
bool receiveNetworkData(bool block);
// Data transmission
bool sendNetworkMessage(const unsigned char* msg, int length);
void sendPendingControlMessages();
bool selectSocket(bool read, bool wait);
};
/******************** Stubs for all public members ********************/
ImageTransfer::ImageTransfer(const char* address, const char* service,
ImageProtocol::ProtocolType protType, bool server, int bufferSize, int maxUdpPacketSize):
pimpl(new Pimpl(address, service, protType, server, bufferSize, maxUdpPacketSize)) {
// All initialization in the pimpl class
}
ImageTransfer::ImageTransfer(const DeviceInfo& device, int bufferSize, int maxUdpPacketSize):
pimpl(new Pimpl(device.getIpAddress().c_str(), "7681", static_cast<ImageProtocol::ProtocolType>(device.getNetworkProtocol()),
false, bufferSize, maxUdpPacketSize)) {
// All initialization in the pimpl class
}
ImageTransfer::~ImageTransfer() {
delete pimpl;
}
void ImageTransfer::setRawTransferData(const ImageSet& metaData, const std::vector<unsigned char*>& rawData,
int firstTileWidth, int secondTileWidth, int validBytes) {
pimpl->setRawTransferData(metaData, rawData, firstTileWidth, secondTileWidth, validBytes);
}
void ImageTransfer::setRawValidBytes(const std::vector<int>& validBytes) {
pimpl->setRawValidBytes(validBytes);
}
void ImageTransfer::setTransferImageSet(const ImageSet& imageSet) {
pimpl->setTransferImageSet(imageSet);
}
ImageTransfer::TransferStatus ImageTransfer::transferData() {
return pimpl->transferData();
}
bool ImageTransfer::receiveImageSet(ImageSet& imageSet) {
return pimpl->receiveImageSet(imageSet);
}
bool ImageTransfer::receivePartialImageSet(ImageSet& imageSet, int& validRows, bool& complete) {
return pimpl->receivePartialImageSet(imageSet, validRows, complete);
}
int ImageTransfer::getNumDroppedFrames() const {
return pimpl->getNumDroppedFrames();
}
bool ImageTransfer::isConnected() const {
return pimpl->isConnected();
}
void ImageTransfer::disconnect() {
pimpl->disconnect();
}
std::string ImageTransfer::getRemoteAddress() const {
return pimpl->getRemoteAddress();
}
bool ImageTransfer::tryAccept() {
return pimpl->tryAccept();
}
/******************** Implementation in pimpl class *******************/
ImageTransfer::Pimpl::Pimpl(const char* address, const char* service,
ImageProtocol::ProtocolType protType, bool server, int
bufferSize, int maxUdpPacketSize)
: protType(protType), isServer(server), bufferSize(bufferSize),
maxUdpPacketSize(maxUdpPacketSize),
clientSocket(INVALID_SOCKET), tcpServerSocket(INVALID_SOCKET),
currentMsgLen(0), currentMsgOffset(0), currentMsg(nullptr) {
Networking::initNetworking();
#ifndef _WIN32
// We don't want to be interrupted by the pipe signal
signal(SIGPIPE, SIG_IGN);
#endif
memset(&remoteAddress, 0, sizeof(remoteAddress));
// If address is null we use the any address
if(address == nullptr || string(address) == "") {
address = "0.0.0.0";
}
addrinfo* addressInfo = Networking::resolveAddress(address, service);
try {
if(protType == ImageProtocol::PROTOCOL_UDP) {
initUdp(addressInfo);
} else if(protType == ImageProtocol::PROTOCOL_TCP && isServer) {
initTcpServer(addressInfo);
} else {
initTcpClient(addressInfo);
}
} catch(...) {
freeaddrinfo(addressInfo);
throw;
}
if(addressInfo != nullptr) {
freeaddrinfo(addressInfo);
}
}
ImageTransfer::Pimpl::~Pimpl() {
if(clientSocket != INVALID_SOCKET) {
Networking::closeSocket(clientSocket);
}
if(tcpServerSocket != INVALID_SOCKET) {
Networking::closeSocket(tcpServerSocket);
}
}
void ImageTransfer::Pimpl::initTcpClient(const addrinfo* addressInfo) {
protocol.reset(new ImageProtocol(isServer, ImageProtocol::PROTOCOL_TCP));
clientSocket = Networking::connectTcpSocket(addressInfo);
memcpy(&remoteAddress, addressInfo->ai_addr, sizeof(remoteAddress));
// Set special socket options
setSocketOptions();
}
void ImageTransfer::Pimpl::initTcpServer(const addrinfo* addressInfo) {
protocol.reset(new ImageProtocol(isServer, ImageProtocol::PROTOCOL_TCP));
// Create socket
tcpServerSocket = ::socket(addressInfo->ai_family, addressInfo->ai_socktype,
addressInfo->ai_protocol);
if (tcpServerSocket == INVALID_SOCKET) {
TransferException ex("Error opening socket: " + string(strerror(errno)));
throw ex;
}
// Enable reuse address
Networking::enableReuseAddress(tcpServerSocket, true);
// Open a server port
Networking::bindSocket(tcpServerSocket, addressInfo);
clientSocket = INVALID_SOCKET;
// Make the server socket non-blocking
Networking::setSocketBlocking(tcpServerSocket, false);
// Listen on port
listen(tcpServerSocket, 1);
}
void ImageTransfer::Pimpl::initUdp(const addrinfo* addressInfo) {
protocol.reset(new ImageProtocol(isServer, ImageProtocol::PROTOCOL_UDP, maxUdpPacketSize));
// Create sockets
clientSocket = socket(AF_INET, SOCK_DGRAM, 0);
if(clientSocket == INVALID_SOCKET) {
TransferException ex("Error creating receive socket: " + string(strerror(errno)));
throw ex;
}
// Enable reuse address
Networking::enableReuseAddress(clientSocket, true);
// Bind socket to port
if(isServer && addressInfo != nullptr) {
Networking::bindSocket(clientSocket, addressInfo);
}
if(!isServer) {
memcpy(&remoteAddress, addressInfo->ai_addr, sizeof(remoteAddress));
}
// Set special socket options
setSocketOptions();
}
bool ImageTransfer::Pimpl::tryAccept() {
if(protType != ImageProtocol::PROTOCOL_TCP || ! isServer) {
throw TransferException("Connections can only be accepted in tcp server mode");
}
unique_lock<recursive_mutex> recvLock(receiveMutex);
unique_lock<recursive_mutex> sendLock(sendMutex);
// Accept one connection
SOCKET newSocket = Networking::acceptConnection(tcpServerSocket, remoteAddress);
if(newSocket == INVALID_SOCKET) {
// No connection
return false;
}
if(clientSocket != INVALID_SOCKET) {
Networking::closeSocket(clientSocket);
}
clientSocket = newSocket;
// Set special socket options
setSocketOptions();
// Reset connection data
protocol->resetTransfer();
protocol->resetReception();
currentMsg = nullptr;
return true;
}
std::string ImageTransfer::Pimpl::getRemoteAddress() const {
unique_lock<recursive_mutex> lock(const_cast<recursive_mutex&>(sendMutex)); // either mutex will work
if(remoteAddress.sin_family != AF_INET) {
return "";
}
char strPort[11];
snprintf(strPort, sizeof(strPort), ":%d", remoteAddress.sin_port);
return string(inet_ntoa(remoteAddress.sin_addr)) + strPort;
}
void ImageTransfer::Pimpl::setSocketOptions() {
// Set the socket buffer sizes
if(bufferSize > 0) {
setsockopt(clientSocket, SOL_SOCKET, SO_RCVBUF, reinterpret_cast<char*>(&bufferSize), sizeof(bufferSize));
setsockopt(clientSocket, SOL_SOCKET, SO_SNDBUF, reinterpret_cast<char*>(&bufferSize), sizeof(bufferSize));
}
Networking::setSocketTimeout(clientSocket, 500);
Networking::setSocketBlocking(clientSocket, true);
}
void ImageTransfer::Pimpl::setRawTransferData(const ImageSet& metaData,
const std::vector<unsigned char*>& rawDataVec, int firstTileWidth, int secondTileWidth, int validBytes) {
unique_lock<recursive_mutex> sendLock(sendMutex);
protocol->setRawTransferData(metaData, rawDataVec, firstTileWidth, secondTileWidth, validBytes);
currentMsg = nullptr;
}
void ImageTransfer::Pimpl::setRawValidBytes(const std::vector<int>& validBytes) {
unique_lock<recursive_mutex> sendLock(sendMutex);
protocol->setRawValidBytes(validBytes);
}
void ImageTransfer::Pimpl::setTransferImageSet(const ImageSet& imageSet) {
unique_lock<recursive_mutex> sendLock(sendMutex);
protocol->setTransferImageSet(imageSet);
currentMsg = nullptr;
}
ImageTransfer::TransferStatus ImageTransfer::Pimpl::transferData() {
unique_lock<recursive_mutex> lock(sendMutex);
// First receive data in case a control message arrives
if(protType == ImageProtocol::PROTOCOL_UDP) {
receiveNetworkData(false);
}
if(remoteAddress.sin_family != AF_INET || !protocol->isConnected()) {
return NOT_CONNECTED;
}
#ifndef _WIN32
// Cork TCP to prevent sending of small packets
if(protType == ImageProtocol::PROTOCOL_TCP) {
int flag = 1;
setsockopt(clientSocket, IPPROTO_TCP, TCP_CORK, (char *) &flag, sizeof(int));
}
#endif
// Get first message to transfer
if(currentMsg == nullptr) {
currentMsgOffset = 0;
currentMsg = protocol->getTransferMessage(currentMsgLen);
if(currentMsg == nullptr) {
if(protocol->transferComplete()) {
return ALL_TRANSFERRED;
} else {
return NO_VALID_DATA;
}
}
}
// Try transferring messages
bool dataTransferred = (currentMsg != nullptr);
while(currentMsg != nullptr) {
int writing = (int)(currentMsgLen - currentMsgOffset);
if(sendNetworkMessage(&currentMsg[currentMsgOffset], writing)) {
// Get next message
currentMsgOffset = 0;
currentMsg = protocol->getTransferMessage(currentMsgLen);
} else {
return WOULD_BLOCK;
}
}
if(dataTransferred && protType == ImageProtocol::PROTOCOL_TCP && protocol->transferComplete()) {
#ifndef _WIN32
// Uncork - sends the assembled messages
int flag = 0;
setsockopt(clientSocket, IPPROTO_TCP, TCP_CORK, (char *) &flag, sizeof(int));
#else
// Force a flush for TCP by turning the nagle algorithm off and on
int flag = 1;
setsockopt(clientSocket, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int));
flag = 0;
setsockopt(clientSocket, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int));
#endif
}
// Also check for control messages at the end
if(protType == ImageProtocol::PROTOCOL_UDP) {
receiveNetworkData(false);
}
if(protocol->transferComplete()) {
return ALL_TRANSFERRED;
} else {
return PARTIAL_TRANSFER;
}
}
bool ImageTransfer::Pimpl::receiveImageSet(ImageSet& imageSet) {
int validRows = 0;
bool complete = false;
std::chrono::steady_clock::time_point startTime = std::chrono::steady_clock::now();
while(!complete) {
if(!receivePartialImageSet(imageSet, validRows, complete)) {
return false;
}
unsigned int time = static_cast<unsigned int>(std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - startTime).count());
if(time > 1000) {
return false;
}
}
return true;
}
bool ImageTransfer::Pimpl::receivePartialImageSet(ImageSet& imageSet,
int& validRows, bool& complete) {
unique_lock<recursive_mutex> lock(receiveMutex);
// Try to receive further image data if needed
bool block = true;
while(!protocol->imagesReceived() && receiveNetworkData(block)) {
block = false;
}
// Get received image
return protocol->getPartiallyReceivedImageSet(imageSet, validRows, complete);
}
bool ImageTransfer::Pimpl::receiveNetworkData(bool block) {
unique_lock<recursive_mutex> lock = block ?
unique_lock<recursive_mutex>(receiveMutex) : unique_lock<recursive_mutex>(receiveMutex, std::try_to_lock);
if(clientSocket == INVALID_SOCKET) {
return false; // Not connected
}
// First send control messages if necessary
sendPendingControlMessages();
if(!lock.owns_lock()) {
// Waiting for the lock would block this call
return false;
}
// Test if the socket has data available
if(!block && !selectSocket(true, false)) {
return 0;
}
int maxLength = 0;
char* buffer = reinterpret_cast<char*>(protocol->getNextReceiveBuffer(maxLength));
// Receive data
sockaddr_in fromAddress;
socklen_t fromSize = sizeof(fromAddress);
int bytesReceived = recvfrom(clientSocket, buffer, maxLength,
0, reinterpret_cast<sockaddr*>(&fromAddress), &fromSize);
if(bytesReceived == 0 || (protType == ImageProtocol::PROTOCOL_TCP && bytesReceived < 0 && errno == WSAECONNRESET)) {
// Connection closed
disconnect();
} else if(bytesReceived < 0 && errno != EWOULDBLOCK && errno != EINTR &&
errno != ETIMEDOUT && errno != WSA_IO_PENDING && errno != WSAECONNRESET) {
TransferException ex("Error reading from socket: " + string(strerror(errno)));
throw ex;
} else if(bytesReceived > 0) {
protocol->processReceivedMessage(bytesReceived);
if(protocol->newClientConnected()) {
// We have just established a new connection
memcpy(&remoteAddress, &fromAddress, sizeof(remoteAddress));
}
}
return bytesReceived > 0;
}
void ImageTransfer::Pimpl::disconnect() {
// We just need to forget the remote address in order to
// disconnect
unique_lock<recursive_mutex> recvLock(receiveMutex);
unique_lock<recursive_mutex> sendLock(sendMutex);
if(clientSocket != INVALID_SOCKET && protType == ImageProtocol::PROTOCOL_TCP) {
Networking::closeSocket(clientSocket);
}
memset(&remoteAddress, 0, sizeof(remoteAddress));
}
bool ImageTransfer::Pimpl::isConnected() const {
unique_lock<recursive_mutex> lock(const_cast<recursive_mutex&>(sendMutex)); //either mutex will work
return remoteAddress.sin_family == AF_INET && protocol->isConnected();
}
bool ImageTransfer::Pimpl::sendNetworkMessage(const unsigned char* msg, int length) {
int written = 0;
if(protType == ImageProtocol::PROTOCOL_UDP) {
sockaddr_in destAddr;
SOCKET destSocket;
{
unique_lock<recursive_mutex> lock(sendMutex);
destAddr = remoteAddress;
destSocket = clientSocket;
}
if(destAddr.sin_family != AF_INET) {
return false; // Not connected
}
written = sendto(destSocket, reinterpret_cast<const char*>(msg), length, 0,
reinterpret_cast<sockaddr*>(&destAddr), sizeof(destAddr));
} else {
SOCKET destSocket;
{
unique_lock<recursive_mutex> lock(sendMutex);
destSocket = clientSocket;
}
written = send(destSocket, reinterpret_cast<const char*>(msg), length, 0);
}
unsigned long sendError = errno;
if(written < 0) {
if(sendError == EAGAIN || sendError == EWOULDBLOCK || sendError == ETIMEDOUT) {
// The socket is not yet ready for a new transfer
return false;
} else if(sendError == EPIPE) {
// The connection has been closed
disconnect();
return false;
} else {
TransferException ex("Error sending network packet: " + string(strerror(sendError)));
throw ex;
}
} else if(written != length) {
if(protType == ImageProtocol::PROTOCOL_UDP) {
// The message has been transmitted partially
throw TransferException("Unable to transmit complete UDP message");
} else {
// For TCP we can transmit the remaining data later
currentMsgOffset += written;
return false;
}
} else {
return true;
}
}
void ImageTransfer::Pimpl::sendPendingControlMessages() {
const unsigned char* controlMsgData = nullptr;
int controlMsgLen = 0;
while(true) {
unique_lock<recursive_mutex> lock(sendMutex);
if(remoteAddress.sin_family != AF_INET) {
return;
}
controlMsgData = protocol->getNextControlMessage(controlMsgLen);
if(controlMsgData != nullptr) {
sendNetworkMessage(controlMsgData, controlMsgLen);
} else {
break;
}
}
}
int ImageTransfer::Pimpl::getNumDroppedFrames() const {
return protocol->getNumDroppedFrames();
}
bool ImageTransfer::Pimpl::selectSocket(bool read, bool wait) {
SOCKET sock;
{
unique_lock<recursive_mutex> lock(sendMutex); // Either mutex will do
sock = clientSocket;
}
#ifdef _WIN32
fd_set fds;
struct timeval tv;
FD_ZERO(&fds);
FD_SET(sock, &fds);
tv.tv_sec = 0;
if(wait) {
tv.tv_usec = 100000;
} else {
tv.tv_usec = 0;
}
if(select(sock+1, (read ? &fds : nullptr), (!read ? &fds : nullptr), nullptr, &tv) <= 0) {
// The socket is currently not ready
return false;
}
#else
// use poll() on non-Windows platform (glibc select() limitations)
constexpr int timeoutMillisec = 100;
pollfd pfd;
pfd.fd = sock;
pfd.events = POLLIN;
if (poll(&pfd, 1, wait ? timeoutMillisec: 0) <= 0) {
// The socket is currently not ready
return false;
}
#endif
// select (or poll) reported an event
return true;
}
std::string ImageTransfer::statusReport() {
return pimpl->statusReport();
}
std::string ImageTransfer::Pimpl::statusReport() {
return protocol->statusReport();
}
} // namespace

View file

@ -0,0 +1,232 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_IMAGETRANSFER_H
#define VISIONTRANSFER_IMAGETRANSFER_H
#include <string>
#include "visiontransfer/common.h"
#include "visiontransfer/imageprotocol.h"
#include "visiontransfer/imageset.h"
#include "visiontransfer/deviceinfo.h"
namespace visiontransfer {
/**
* \brief Class for synchronous transfer of image sets.
*
* This class opens a network socket for delivering or receiving image sets. All
* operations are performed synchronously, which means that they might block.
* The class encapsulates ImageProtocol.
*
* This class is thread safe for as long as sending and receiving data
* each has its dedicated thread.
*/
class VT_EXPORT ImageTransfer {
public:
/// The result of a partial image transfer
enum TransferStatus {
/// The image set has been transferred completely.
ALL_TRANSFERRED,
/// The image set has been transferred partially. Further
/// transfers are necessary.
PARTIAL_TRANSFER,
/// There is currently no more data that could be transmitted.
NO_VALID_DATA,
/// The operation would block and blocking as been disabled.
WOULD_BLOCK,
/// No network connection has been established
NOT_CONNECTED
};
/**
* \brief Creates a new transfer object by manually specifying the
* target address.
*
* \param address Address of the remote host to which a connection
* should be established. In server mode this can be a local
* interface address or NULL.
* \param service The port number that should be used as string or
* as textual service name.
* \param protType Specifies whether the UDP or TCP transport protocol
* shall be used.
* \param server If set to true, this object will be a communication server.
* \param bufferSize Buffer size for sending / receiving network data.
* \param maxUdpPacketSize Maximum allowed size of a UDP packet when sending data.
*/
ImageTransfer(const char* address, const char* service = "7681",
ImageProtocol::ProtocolType protType = ImageProtocol::PROTOCOL_UDP,
bool server = false, int bufferSize = 1048576, int maxUdpPacketSize = 1472);
/**
* \brief Creates a new transfer object by using the device information
* from device enumeration.
*
* \param device Information on the device to which a connection should
* be established.
* \param bufferSize Buffer size for sending / receiving network data.
* \param maxUdpPacketSize Maximum allowed size of a UDP packet when sending data.
*/
ImageTransfer(const DeviceInfo& device, int bufferSize = 1048576,
int maxUdpPacketSize = 1472);
~ImageTransfer();
/**
* \brief Sets the raw pixel data for a partial image transmission.
*
* This method has to be used in conjunction with transferData().
* Please see ImageProtocol::setRawTransferData() for further details.
*/
void setRawTransferData(const ImageSet& metaData, const std::vector<unsigned char*>& rawData,
int firstTileWidth = 0, int secondTileWidth = 0, int validBytes = 0x7FFFFFFF);
/**
* \brief Updates the number of valid bytes in a partial raw transmission.
*
* Please see ImageProtocol::setRawValidBytes() for further details.
*/
void setRawValidBytes(const std::vector<int>& validBytes);
/**
* \brief Sets a new image set that shall be transmitted.
*
* \param imageSet The image set that shall be transmitted.
*
* After setting the image set, subsequent calls to transferData()
* are necessary for performing the image transmission.
*
* \see ImageProtocol::setTransferImageSet()
*/
void setTransferImageSet(const ImageSet& imageSet);
/**
* \brief Performs a partial (or full) image transmission.
*
* \return Status of the transmission. See below.
*
* The method transfers up to the specified number of valid bytes. It has to
* be called in cycles in order to transfer a full image set. If there
* is no more data to be transferred, it will return TransferStatus::NO_VALID_DATA.
*
* If the transfer is compete, the method will return
* TransferStatus::ALL_TRANSFERRED. If there remains outstanding data for
* this transfer, the return value will be TransferStatus::PARTIAL_TRANSFER.
* If the connection is no longer open, TransferStatus::CONNECTION_CLOSED
* is returned.
*
* Even after a complete transfer this method should be continued to be
* called in case a packed needs to be retransmitted due to an unreliable
* network connection. Also for a communication server this method should
* be called frequently to accept incoming connections.
*/
TransferStatus transferData();
/**
* \brief Waits for and receives a new image set.
*
* \param imageSet Will be set to the received image set.
* \return Returns true if a new image set has been received. Otherwise
* false.
*
* The received image set is only valid until the next call of receiveImageSet().
* The method will not block indefinitely, but return after a short timeout.
*
* \see ImageProtocol::getReceivedImageSet()
*/
bool receiveImageSet(ImageSet& imageSet);
#ifndef DOXYGEN_SHOULD_SKIP_THIS
DEPRECATED("Use receiveImageSet() instead")
inline bool receiveImagePair(ImageSet& imageSet) {
return receiveImageSet(imageSet);
}
#endif
/**
* \brief Returns the received image set, even if it is not yet complete.
*
* The received image set is only valid until calling receivePartialImageSet()
* for the first time after the current image set has been received completely.
* The method returns false if no image data has been received.
*
* Please see ImageProtocol::getPartiallyReceivedImageSet() for further details.
*/
bool receivePartialImageSet(ImageSet& imageSet, int& validRows, bool& complete);
#ifndef DOXYGEN_SHOULD_SKIP_THIS
DEPRECATED("Use receivePartialImageSet() instead")
inline bool receivePartialImagePair(ImageSet& imageSet, int& validRows, bool& complete) {
return receivePartialImageSet(imageSet, validRows, complete);
}
#endif
/**
* \brief Returns the number of frames that have been dropped since
* connecting to the current remote host.
*
* Dropped frames are caused by dropped packets due to a poor network
* connection
*/
int getNumDroppedFrames() const;
/**
* \brief Tries to accept a client connection.
*
* \return True if a client has connected.
*
* This method can only be used in TCP server mode. It shall be called in
* regular intervals to allow for client connections. The method is
* always non-blocking.
*/
bool tryAccept();
/**
* \brief Returns true if a remote connection is established
*/
bool isConnected() const;
/**
* \brief Terminates the current connection.
*
* If connected to a remote host this connection will be closed.
*/
void disconnect();
/**
* \brief Returns the address of the remote host
*
* \return Remote address or "" if no connection has been established.
*/
std::string getRemoteAddress() const;
std::string statusReport();
private:
// We follow the pimpl idiom
class Pimpl;
Pimpl* pimpl;
// This class cannot be copied
ImageTransfer(const ImageTransfer& other);
ImageTransfer& operator=(const ImageTransfer&);
};
} // namespace
#endif

View file

@ -0,0 +1,26 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include "visiontransfer/internalinformation.h"
using namespace visiontransfer;
using namespace visiontransfer::internal;
namespace visiontransfer {
namespace internal {
const char InternalInformation::DISCOVERY_BROADCAST_MSG[16] = "NERIAN-DISCOVER";
}} // namespace

View file

@ -0,0 +1,54 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_INTERNALINFORMATION_H
#define VISIONTRANSFER_INTERNALINFORMATION_H
namespace visiontransfer {
namespace internal {
/**
* \brief Information that is required internally by different program
* parts.
*/
struct InternalInformation {
#pragma pack(push,1)
struct DiscoveryMessageBasic {
unsigned char protocolVersion;
unsigned char model;
unsigned char useTcp;
char firmwareVersion[14];
};
struct DiscoveryMessage: public DiscoveryMessageBasic {
// Extended device status / health info
double lastFps; // Most recent FPS report, or 0.0 if N/A
unsigned int jumboSize; // Jumbo MTU or 0 if disabled
char currentCaptureSource[8]; // For targeted debug instructions
};
#pragma pack(pop)
static const char DISCOVERY_BROADCAST_MSG[16];
static constexpr int DISCOVERY_BROADCAST_PORT = 7680;
static constexpr int IMAGEDATA_PORT = 7681;
static constexpr int WEBSOCKET_PORT = 7682;
static constexpr int PARAMETER_PORT = 7683;
static constexpr int DATACHANNELSERVICE_PORT = 7684;
static constexpr unsigned char CURRENT_PROTOCOL_VERSION = 0x06;
};
}} // namespace
#endif

View file

@ -0,0 +1,158 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include "visiontransfer/networking.h"
#include "visiontransfer/exceptions.h"
#include <cstring>
#include <fcntl.h>
using namespace std;
using namespace visiontransfer;
using namespace visiontransfer::internal;
namespace visiontransfer {
namespace internal {
void Networking::initNetworking() {
#ifdef _WIN32
// In windows, we first have to initialize winsock
WSADATA wsaData;
if (WSAStartup(MAKEWORD(2, 2), &wsaData) != 0) {
throw TransferException("WSAStartup failed!");
}
#endif
}
addrinfo* Networking::resolveAddress(const char* address, const char* service) {
addrinfo hints;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_INET; // Use IPv4
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = 0;
hints.ai_protocol = 0;
addrinfo* addressInfo = nullptr;
if(getaddrinfo(address, service, &hints, &addressInfo) != 0 || addressInfo == nullptr) {
TransferException ex("Error resolving address: " + string(strerror(errno)));
throw ex;
}
if(addressInfo->ai_addrlen != sizeof(sockaddr_in)) {
throw TransferException("Illegal address length");
}
return addressInfo;
}
SOCKET Networking::connectTcpSocket(const addrinfo* address) {
SOCKET sock = ::socket(address->ai_family, address->ai_socktype,
address->ai_protocol);
if(sock == INVALID_SOCKET) {
TransferException ex("Error creating socket: " + string(strerror(errno)));
throw ex;
}
if(connect(sock, address->ai_addr, static_cast<int>(address->ai_addrlen)) < 0) {
TransferException ex("Error connection to destination address: " + string(strerror(errno)));
throw ex;
}
return sock;
}
void Networking::setSocketTimeout(SOCKET socket, int timeoutMillisec) {
#ifdef _WIN32
unsigned int timeout = timeoutMillisec;
#else
struct timeval timeout;
timeout.tv_sec = timeoutMillisec/1000;
timeout.tv_usec = timeoutMillisec*1000;
#endif
setsockopt(socket, SOL_SOCKET, SO_RCVTIMEO, reinterpret_cast<char*>(&timeout), sizeof(timeout));
setsockopt(socket, SOL_SOCKET, SO_SNDTIMEO, reinterpret_cast<char*>(&timeout), sizeof(timeout));
}
void Networking::closeSocket(SOCKET& socket) {
setSocketBlocking(socket, false);
shutdown(socket, SHUT_WR);
// Receive remaining data
char buffer[1024];
for(int i=0; i<3; i++) {
int received = recv(socket, buffer, sizeof(buffer), 0);
if(received <= 0) {
break;
}
}
close(socket);
socket = INVALID_SOCKET;
}
void Networking::setSocketBlocking(SOCKET socket, bool blocking) {
#ifdef _WIN32
unsigned long on = (blocking ? 0 : 1);
ioctlsocket(socket, FIONBIO, &on);
#else
int flags = fcntl(socket, F_GETFL, 0);
if(flags != -1) {
if(blocking) {
flags &= ~O_NONBLOCK;
} else {
flags |= O_NONBLOCK;
}
fcntl(socket, F_SETFL, flags);
}
#endif
}
void Networking::enableReuseAddress(SOCKET socket, bool reuse) {
int enable = reuse ? 1 : 0;
setsockopt(socket, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast<char*>(&enable), sizeof(int));
}
void Networking::bindSocket(SOCKET socket, const addrinfo* addressInfo) {
if (::bind(socket, addressInfo->ai_addr, static_cast<int>(addressInfo->ai_addrlen)) < 0) {
TransferException ex("Error binding socket: " + string(strerror(errno)));
throw ex;
}
}
SOCKET Networking::acceptConnection(SOCKET socket, sockaddr_in& remoteAddress) {
socklen_t clientAddressLength = sizeof(sockaddr_in);
SOCKET newSocket = accept(socket, reinterpret_cast<sockaddr *>(&remoteAddress),
&clientAddressLength);
if(clientAddressLength != sizeof(sockaddr_in)) {
throw TransferException("Received network address with invalid length");
}
if(newSocket == INVALID_SOCKET) {
if(errno == EWOULDBLOCK || errno == ETIMEDOUT) {
// No connection
return INVALID_SOCKET;
} else {
TransferException ex("Error accepting connection: " + string(strerror(errno)));
throw ex;
}
}
return newSocket;
}
}} // namespace

View file

@ -0,0 +1,141 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
/*******************************************************************************
* This header file contains include statements and definitions for simplifying
* cross platform network development
*******************************************************************************/
#ifndef VISIONTRANSFER_NETWORKING_H
#define VISIONTRANSFER_NETWORKING_H
// Network headers
#ifdef _WIN32
#ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x501
#endif
#define _WINSOCK_DEPRECATED_NO_WARNINGS
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <string>
#include <cstdio>
#include <winsock2.h>
#include <ws2tcpip.h>
#include <Ipmib.h>
#include <Iprtrmib.h>
#include <Iphlpapi.h>
// Some defines to make windows socket look more like
// posix sockets.
#ifdef EWOULDBLOCK
#undef EWOULDBLOCK
#endif
#ifdef ECONNRESET
#undef ECONNRESET
#endif
#ifdef ETIMEDOUT
#undef ETIMEDOUT
#endif
#ifdef EPIPE
#undef EPIPE
#endif
#define EWOULDBLOCK WSAEWOULDBLOCK
#define ECONNRESET WSAECONNRESET
#define ETIMEDOUT WSAETIMEDOUT
#define EPIPE WSAECONNABORTED
#define MSG_DONTWAIT 0
#define SHUT_WR SD_BOTH
inline int close(SOCKET s) {
return closesocket(s);
}
// Emulate posix errno. Does not work in a throw
// statement (WTF?)
#undef errno
#define errno WSAGetLastError()
#define strerror win_strerror
// Visual studio does not come with snprintf
#ifndef snprintf
#define snprintf _snprintf_s
#endif
inline std::string win_strerror(unsigned long error) {
char* str = nullptr;
if(FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
nullptr, error, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPSTR)&str, 0, nullptr) == 0 || str == nullptr) {
return "Unknown error";
} else {
char buffer[512];
snprintf(buffer, sizeof(buffer), "%s (%lu)", str, error);
LocalFree(str);
return std::string(buffer);
}
}
typedef int socklen_t;
#else
#include <arpa/inet.h>
#include <netinet/tcp.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/select.h>
#include <netdb.h>
#include <netinet/in.h>
#include <errno.h>
#include <unistd.h>
#include <signal.h>
#include <ifaddrs.h>
#include <poll.h>
// Unfortunately we have to use a winsock like socket type
typedef int SOCKET;
#define INVALID_SOCKET -1
// Also we need some additional winsock defines
#define WSA_IO_PENDING 0
#define WSAECONNRESET 0
#endif
namespace visiontransfer {
namespace internal {
/**
* \brief A collection of helper functions for implementing network communication.
*/
class Networking {
public:
static void initNetworking();
static addrinfo* resolveAddress(const char* address, const char* service);
static SOCKET connectTcpSocket(const addrinfo* address);
static void setSocketTimeout(SOCKET socket, int timeoutMillisec);
static void closeSocket(SOCKET& socket);
static void setSocketBlocking(SOCKET socket, bool blocking);
static void enableReuseAddress(SOCKET socket, bool reuse);
static void bindSocket(SOCKET socket, const addrinfo* addressInfo);
static SOCKET acceptConnection(SOCKET socket, sockaddr_in& remoteAddress);
};
}} // namespace
#endif

View file

@ -0,0 +1,154 @@
#include "visiontransfer/parameterinfo.h"
#include "visiontransfer/exceptions.h"
namespace visiontransfer {
class ParameterInfo::Pimpl {
public:
Pimpl(): type(ParameterInfo::TYPE_INT), value({0}), min({0}), max({0}), inc({0}) { }
template<typename T> void set(const std::string& name, bool writeable,
T value, T min, T max, T inc);
inline std::string getName() const { return name; }
inline ParameterType getType() const { return type; }
inline bool isWriteable() const { return writeable; }
template<typename T> T getTypedValue(const ParameterValue& val) const;
template<typename T> T getValue() const { return getTypedValue<T>(value); }
template<typename T> T getMin() const { return getTypedValue<T>(min); }
template<typename T> T getMax() const { return getTypedValue<T>(max); }
template<typename T> T getInc() const { return getTypedValue<T>(inc); }
private:
std::string name;
ParameterType type;
bool writeable;
ParameterValue value;
ParameterValue min;
ParameterValue max;
ParameterValue inc;
};
// ParameterInfo, for abstracted enumerations of parameters
ParameterInfo::ParameterInfo()
{
pimpl = new ParameterInfo::Pimpl();
}
template<> void ParameterInfo::Pimpl::set(const std::string& name, bool writeable, int value, int min, int max, int inc)
{
this->name = name;
this->type = ParameterInfo::TYPE_INT;
this->writeable = writeable;
this->value.intVal = value;
this->min.intVal = min;
this->max.intVal = max;
this->inc.intVal = inc;
}
template<> void ParameterInfo::Pimpl::set(const std::string& name, bool writeable, double value, double min, double max, double inc)
{
this->name = name;
this->type = ParameterInfo::TYPE_DOUBLE;
this->writeable = writeable;
this->value.doubleVal = value;
this->min.doubleVal = min;
this->max.doubleVal = max;
this->inc.doubleVal = inc;
}
template<> void ParameterInfo::Pimpl::set(const std::string& name, bool writeable, bool value, bool min, bool max, bool inc)
{
this->name = name;
this->type = ParameterInfo::TYPE_BOOL;
this->writeable = writeable;
this->value.boolVal = value;
this->min.boolVal = min;
this->max.boolVal = max;
this->inc.boolVal = inc;
}
ParameterInfo ParameterInfo::fromInt(const std::string& name, bool writeable,
int value, int min, int max, int inc) {
ParameterInfo pi;
pi.pimpl->set<int>(name, writeable, value, min, max, inc);
return pi;
}
ParameterInfo ParameterInfo::fromDouble(const std::string& name, bool writeable,
double value, double min, double max, double inc) {
ParameterInfo pi;
pi.pimpl->set<double>(name, writeable, value, min, max, inc);
return pi;
}
ParameterInfo ParameterInfo::fromBool(const std::string& name, bool writeable, bool value) {
ParameterInfo pi;
pi.pimpl->set<bool>(name, writeable, value, 0, 1, 1);
return pi;
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template<> int ParameterInfo::Pimpl::getTypedValue(const ParameterInfo::ParameterValue& val) const {
switch (type) {
case ParameterInfo::TYPE_INT: {
return val.intVal;
}
case ParameterInfo::TYPE_BOOL: {
return (int) val.boolVal;
}
case ParameterInfo::TYPE_DOUBLE: {
return (int) val.doubleVal;
}
}
throw ParameterException("Unexpected parameter type");
}
template<> double ParameterInfo::Pimpl::getTypedValue(const ParameterInfo::ParameterValue& val) const {
switch (type) {
case ParameterInfo::TYPE_DOUBLE: {
return val.doubleVal;
}
case ParameterInfo::TYPE_INT: {
return (double) val.intVal;
}
case ParameterInfo::TYPE_BOOL: {
return val.boolVal?1.0:0.0;
}
}
throw ParameterException("Unexpected parameter type");
}
template<> bool ParameterInfo::Pimpl::getTypedValue(const ParameterInfo::ParameterValue& val) const {
switch (type) {
case ParameterInfo::TYPE_BOOL: {
return val.boolVal;
}
case ParameterInfo::TYPE_DOUBLE: {
return val.doubleVal != 0.0;
}
case ParameterInfo::TYPE_INT: {
return val.intVal != 0;
}
}
throw ParameterException("Unexpected parameter type");
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
std::string ParameterInfo::getName() const { return pimpl->getName(); }
ParameterInfo::ParameterType ParameterInfo::getType() const { return pimpl->getType(); }
bool ParameterInfo::isWriteable() const { return pimpl->isWriteable(); }
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template<> VT_EXPORT int ParameterInfo::getValue() const { return pimpl->getValue<int>(); }
template<> VT_EXPORT double ParameterInfo::getValue() const { return pimpl->getValue<double>(); }
template<> VT_EXPORT bool ParameterInfo::getValue() const { return pimpl->getValue<bool>(); }
template<> VT_EXPORT int ParameterInfo::getMin() const { return pimpl->getMin<int>(); }
template<> VT_EXPORT double ParameterInfo::getMin() const { return pimpl->getMin<double>(); }
template<> VT_EXPORT bool ParameterInfo::getMin() const { return pimpl->getMin<bool>(); }
template<> VT_EXPORT int ParameterInfo::getMax() const { return pimpl->getMax<int>(); }
template<> VT_EXPORT double ParameterInfo::getMax() const { return pimpl->getMax<double>(); }
template<> VT_EXPORT bool ParameterInfo::getMax() const { return pimpl->getMax<bool>(); }
template<> VT_EXPORT int ParameterInfo::getInc() const { return pimpl->getInc<int>(); }
template<> VT_EXPORT double ParameterInfo::getInc() const { return pimpl->getInc<double>(); }
template<> VT_EXPORT bool ParameterInfo::getInc() const { return pimpl->getInc<bool>(); }
#endif // DOXYGEN_SHOULD_SKIP_THIS
} // namespace

View file

@ -0,0 +1,86 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_STANDARDPARAMETERS_H
#define VISIONTRANSFER_STANDARDPARAMETERS_H
#include <map>
#include <string>
#include "visiontransfer/common.h"
namespace visiontransfer {
class VT_EXPORT ParameterInfo {
public:
union ParameterValue {
int32_t intVal;
bool boolVal;
double doubleVal;
};
enum ParameterType {
TYPE_INT = 1,
TYPE_DOUBLE = 2,
TYPE_BOOL = 3,
};
ParameterInfo();
#ifndef DOXYGEN_SHOULD_SKIP_THIS
// For internal use only
static ParameterInfo fromInt(const std::string& name, bool writeable,
int value, int min = -1, int max = -1, int inc = -1);
static ParameterInfo fromDouble(const std::string& name, bool writeable,
double value, double min = -1, double max = -1, double inc = -1);
static ParameterInfo fromBool(const std::string& name, bool writeable, bool value);
#endif
/**
* \brief Returns the string representation of the parameter name
*/
std::string getName() const;
/**
* \brief Returns the type of the parameter
*/
ParameterType getType() const;
/**
* \brief Returns whether the parameter is writeable (or read-only)
*/
bool isWriteable() const;
/**
* \brief Returns the current parameter value, cast to the desired type (int, double or bool)
*/
template<typename T> T getValue() const;
/**
* \brief Returns the minimum parameter value, cast to the desired type (int, double or bool)
*/
template<typename T> T getMin() const;
/**
* \brief Returns the maximum parameter value, cast to the desired type (int, double or bool)
*/
template<typename T> T getMax() const;
/**
* \brief Returns the increment of the parameter (i.e. increment for raising / lowering the value), cast to the desired type (int, double or bool)
*/
template<typename T> T getInc() const;
private:
class Pimpl;
Pimpl* pimpl;
};
} // namespace
#endif

View file

@ -0,0 +1,246 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include <iostream>
#include "visiontransfer/parametertransfer.h"
#include "visiontransfer/exceptions.h"
#include "visiontransfer/internalinformation.h"
#include "visiontransfer/standardparameterids.h"
#include "visiontransfer/parametertransferdata.h"
#include <cstring>
#include <string>
using namespace std;
using namespace visiontransfer;
using namespace visiontransfer::internal;
namespace visiontransfer {
namespace internal {
constexpr int ParameterTransfer::SOCKET_TIMEOUT_MS;
ParameterTransfer::ParameterTransfer(const char* address, const char* service)
: socket(INVALID_SOCKET) {
Networking::initNetworking();
addrinfo* addressInfo = Networking::resolveAddress(address, service);
socket = Networking::connectTcpSocket(addressInfo);
Networking::setSocketTimeout(socket, SOCKET_TIMEOUT_MS);
checkProtocolVersion();
freeaddrinfo(addressInfo);
}
ParameterTransfer::~ParameterTransfer() {
if(socket != INVALID_SOCKET) {
Networking::closeSocket(socket);
}
}
std::map<std::string, ParameterInfo> ParameterTransfer::recvEnumeration() {
std::map<std::string, ParameterInfo> pi;
const size_t bufsize = 4096;
char buf[bufsize];
char* recv_buf = buf;
int bytesReceived = recv(socket, recv_buf, 4, 0);
if(bytesReceived < 0) {
TransferException ex("Error receiving network packet: " + string(strerror(errno)));
throw ex;
} else if (bytesReceived == 0) {
TransferException ex("Error receiving network packet: connection closed");
throw ex;
} else if (bytesReceived < 4) {
TransferException ex("Error receiving parameter enumeration - no length!");
throw ex;
}
recv_buf += 4;
// Number of parameters in the first received uint32
uint32_t num_params = ntohl(reinterpret_cast<uint32_t*>(buf)[0]);
// Expected size of following data block, read until met
size_t expected_remaining_size = num_params * sizeof(TransportParameterInfo);
if (expected_remaining_size > bufsize - 4) {
TransferException ex("Remote parameter enumeration exceeds expected maximum size");
throw ex;
}
while (expected_remaining_size > 0) {
bytesReceived = recv(socket, recv_buf, expected_remaining_size, 0);
if (bytesReceived < 0) {
TransferException ex("Error receiving network packet: " + string(strerror(errno)));
throw ex;
} else if (bytesReceived == 0) {
TransferException ex("Error receiving network packet: connection closed");
throw ex;
} else {
expected_remaining_size -= bytesReceived;
recv_buf += bytesReceived;
}
}
TransportParameterInfo* tpi = reinterpret_cast<TransportParameterInfo*>(buf + 4);
for (unsigned int i = 0; i < num_params; ++i) {
StandardParameterIDs::ParameterID id = (StandardParameterIDs::ParameterID) ntohl(tpi->id);
ParameterInfo::ParameterType type = (ParameterInfo::ParameterType) ntohl(tpi->type);
bool writeable = ntohl(tpi->flags & StandardParameterIDs::ParameterFlags::PARAMETER_WRITEABLE) != 0;
//
auto nameIt = internal::StandardParameterIDs::parameterNameByID.find(id);
if (nameIt == StandardParameterIDs::parameterNameByID.end()) {
std::cerr << "Enumeration contained a ParameterID for which no name is known: " << std::to_string(id) << std::endl;
std::cerr << "Parameter ignored; please ensure your libvisiontransfer is up to date." << std::endl;
} else {
switch(type) {
case ParameterInfo::TYPE_INT: {
pi[nameIt->second] = visiontransfer::ParameterInfo::fromInt(nameIt->second, writeable,
ntohl(tpi->value.intVal), ntohl(tpi->min.intVal), ntohl(tpi->max.intVal), ntohl(tpi->inc.intVal)
);
break;
}
case ParameterInfo::TYPE_BOOL: {
pi[nameIt->second] = visiontransfer::ParameterInfo::fromBool(nameIt->second, writeable, ntohl(tpi->value.boolVal) != 0);
break;
}
case ParameterInfo::TYPE_DOUBLE: {
pi[nameIt->second] = visiontransfer::ParameterInfo::fromDouble(nameIt->second, writeable,
tpi->value.doubleVal, tpi->min.doubleVal, tpi->max.doubleVal, tpi->inc.doubleVal
);
break;
}
default: {
}
}
}
++tpi;
}
return pi;
}
void ParameterTransfer::recvData(unsigned char* dest, int length) {
int bytesReceived = recv(socket, reinterpret_cast<char*>(dest), length, 0);
if(bytesReceived < 0) {
TransferException ex("Error receiving network packet: " + string(strerror(errno)));
throw ex;
} else if(bytesReceived < length) {
throw TransferException("Received too short network packet!");
}
}
void ParameterTransfer::checkProtocolVersion() {
unsigned int version = 0;
recvData(reinterpret_cast<unsigned char*>(&version), sizeof(version));
if(ntohl(version) != static_cast<unsigned int>(InternalInformation::CURRENT_PROTOCOL_VERSION)) {
throw ParameterException("Protocol version mismatch! Expected "
+ std::to_string(InternalInformation::CURRENT_PROTOCOL_VERSION)
+ " but received " + std::to_string(ntohl(version)));
}
}
void ParameterTransfer::readParameter(unsigned char messageType, int32_t id, unsigned char* dest, int length) {
if(length > 8) {
throw ParameterException("Parameter type size mismatch!");
}
unsigned int networkId = htonl(id);
unsigned char messageBuf[13];
memset(messageBuf, 0, sizeof(messageBuf));
messageBuf[0] = messageType;
memcpy(&messageBuf[1], &networkId, 4);
int written = send(socket, reinterpret_cast<char*>(messageBuf), sizeof(messageBuf), 0);
if(written != sizeof(messageBuf)) {
TransferException ex("Error sending parameter read request: " + string(strerror(errno)));
throw ex;
}
unsigned char replyBuf[8];
recvData(replyBuf, sizeof(replyBuf));
memcpy(dest, replyBuf, length);
}
template<typename T>
void ParameterTransfer::writeParameter(unsigned char messageType, int32_t id, T value) {
static_assert(sizeof(T) <= 8, "Parameter type musst be smaller or equal to 8 bytes");
unsigned int networkId = htonl(id);
unsigned char messageBuf[13];
memset(messageBuf, 0, sizeof(messageBuf));
messageBuf[0] = messageType;
memcpy(&messageBuf[1], &networkId, 4);
memcpy(&messageBuf[5], &value, sizeof(value));
int written = send(socket, reinterpret_cast<char*>(messageBuf), sizeof(messageBuf), 0);
if(written != sizeof(messageBuf)) {
TransferException ex("Error sending parameter write request: " + string(strerror(errno)));
throw ex;
}
unsigned char replyBuf[8];
recvData(replyBuf, sizeof(replyBuf));
if(replyBuf[0] == 0 && replyBuf[1] == 0 && replyBuf[2] == 0 && replyBuf[3] == 0) {
throw ParameterException("Unable to write parameter");
}
}
int ParameterTransfer::readIntParameter(int32_t id) {
unsigned int data;
readParameter(MESSAGE_READ_INT, id, reinterpret_cast<unsigned char*>(&data), sizeof(data));
return static_cast<int>(ntohl(data));
}
double ParameterTransfer::readDoubleParameter(int32_t id) {
double data;
readParameter(MESSAGE_READ_DOUBLE, id, reinterpret_cast<unsigned char*>(&data), sizeof(data));
return data;
}
bool ParameterTransfer::readBoolParameter(int32_t id) {
unsigned int data;
readParameter(MESSAGE_READ_BOOL, id, reinterpret_cast<unsigned char*>(&data), sizeof(data));
return (data != 0);
}
void ParameterTransfer::writeIntParameter(int32_t id, int32_t value) {
writeParameter(MESSAGE_WRITE_INT, id, htonl(static_cast<uint32_t>(value)));
}
void ParameterTransfer::writeDoubleParameter(int32_t id, double value) {
writeParameter(MESSAGE_WRITE_DOUBLE, id, value);
}
void ParameterTransfer::writeBoolParameter(int32_t id, int32_t value) {
writeParameter(MESSAGE_WRITE_BOOL, id, htonl(static_cast<uint32_t>(value)));
}
std::map<std::string, ParameterInfo> ParameterTransfer::getAllParameters() {
unsigned char messageBuf[13]; // padded to common message size, payload ignored
memset(messageBuf, 0, sizeof(messageBuf));
messageBuf[0] = MESSAGE_ENUMERATE_PARAMS;
int written = send(socket, reinterpret_cast<char*>(messageBuf), sizeof(messageBuf), 0);
if(written != sizeof(messageBuf)) {
TransferException ex("Error sending parameter enumeration request: " + string(strerror(errno)));
throw ex;
}
auto enumeration = recvEnumeration();
return enumeration;
}
}} // namespace

View file

@ -0,0 +1,155 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_PARAMETERTRANSFER_H
#define VISIONTRANSFER_PARAMETERTRANSFER_H
#include "visiontransfer/networking.h"
#include "visiontransfer/parameterinfo.h"
#include <map>
namespace visiontransfer {
namespace internal {
/**
* \brief Allows a configuration of device parameters over the network.
*
* A TCP connection is established to a parameter server. The protocol
* allows writing and reading of individual parameters, which are
* identified by a unique ID. There are three supported types of
* parameters: integers, double precision floating point values, and
* booleans.
*
* This class is only used internally. Users should use the class
* \ref DeviceParameters instead.
*/
class ParameterTransfer {
public:
/**
* \brief Creates an object and connects to the given server.
*
* \param address IP address or host name of the server.
* \param service The port number that should be used as string or
* as textual service name.
*/
ParameterTransfer(const char* address, const char* service = "7683");
~ParameterTransfer();
/**
* \brief Reads an integer value from the parameter server.
*
* \param id Unique ID of the parameter to be read.
* \return If successful, the value of the parameter that has
* been read
*
* If reading the parameter fails, then an exception of type
* TransferException or ParameterException is thrown.
*/
int readIntParameter(int32_t id);
/**
* \brief Reads a double precision floating point value from the
* parameter server.
*
* \param id Unique ID of the parameter to be read.
* \return If successful, the value of the parameter that has
* been read
*
* If reading the parameter fails, then an exception of type
* TransferException or ParameterException is thrown.
*/
double readDoubleParameter(int32_t id);
/**
* \brief Reads a boolean value from the parameter server.
*
* \param id Unique ID of the parameter to be read.
* \return If successful, the value of the parameter that has
* been read
*
* If reading the parameter fails, then an exception of type
* TransferException or ParameterException is thrown.
*/
bool readBoolParameter(int32_t id);
/**
* \brief Writes an integer value to a parameter of the parameter
* server.
*
* \param id Unique ID of the parameter to be written.
* \param value Value that should be written to the parameter.
*
* If writing the parameter fails, then an exception of type
* TransferException or ParameterException is thrown.
*/
void writeIntParameter(int32_t id, int32_t value);
/**
* \brief Writes a double precision floating point value to a
* parameter of the parameter server.
*
* \param id Unique ID of the parameter to be written.
* \param value Value that should be written to the parameter.
*
* If writing the parameter fails, then an exception of type
* TransferException or ParameterException is thrown.
*/
void writeDoubleParameter(int32_t id, double value);
/**
* \brief Writes a boolean value to a parameter of the parameter
* server.
*
* \param id Unique ID of the parameter to be written.
* \param value Value that should be written to the parameter.
*
* If writing the parameter fails, then an exception of type
* TransferException or ParameterException is thrown.
*/
void writeBoolParameter(int32_t id, int32_t value);
/**
* \brief Enumerates all parameters as reported by the device.
*/
std::map<std::string, ParameterInfo> getAllParameters();
private:
static constexpr int SOCKET_TIMEOUT_MS = 500;
// Message types
static constexpr unsigned char MESSAGE_READ_INT = 0x01;
static constexpr unsigned char MESSAGE_READ_DOUBLE = 0x02;
static constexpr unsigned char MESSAGE_READ_BOOL = 0x03;
static constexpr unsigned char MESSAGE_WRITE_INT = 0x04;
static constexpr unsigned char MESSAGE_WRITE_DOUBLE = 0x05;
static constexpr unsigned char MESSAGE_WRITE_BOOL = 0x06;
static constexpr unsigned char MESSAGE_ENUMERATE_PARAMS = 0x07;
SOCKET socket;
void checkProtocolVersion();
void readParameter(unsigned char messageType, int32_t id, unsigned char* dest, int length);
void recvData(unsigned char* dest, int length);
template<typename T>
void writeParameter(unsigned char messageType, int32_t id, T value);
std::map<std::string, ParameterInfo> recvEnumeration();
};
}} // namespace
#endif

View file

@ -0,0 +1,36 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_PARAMETERTRANSFERDATA_H
#define VISIONTRANSFER_PARAMETERTRANSFERDATA_H
namespace visiontransfer {
namespace internal {
#pragma pack(push,1)
struct TransportParameterInfo {
uint32_t id = 0;
uint32_t type = 0;
uint32_t flags = 0;
ParameterInfo::ParameterValue value = {0};
ParameterInfo::ParameterValue min = {0};
ParameterInfo::ParameterValue max = {0};
ParameterInfo::ParameterValue inc = {0};
};
#pragma pack(pop)
}} // namespace
#endif

View file

@ -0,0 +1,443 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_PROTOCOL_SH2_IMU_BNO080
#define VISIONTRANSFER_PROTOCOL_SH2_IMU_BNO080
#include <cstdint>
namespace visiontransfer {
namespace internal {
struct SH2Constants {
static constexpr uint8_t CHANNEL_COMMAND = 0;
static constexpr uint8_t CHANNEL_EXECUTABLE = 1;
static constexpr uint8_t CHANNEL_CONTROL = 2;
static constexpr uint8_t CHANNEL_REPORTS = 3;
static constexpr uint8_t CHANNEL_WAKE_REPORTS= 4;
static constexpr uint8_t CHANNEL_GYRO = 5;
static constexpr uint8_t REPORT_COMMAND_RESPONSE = 0xf1;
static constexpr uint8_t REPORT_COMMAND_REQUEST = 0xf2;
static constexpr uint8_t REPORT_FRS_READ_RESPONSE = 0xf3;
static constexpr uint8_t REPORT_FRS_READ_REQUEST = 0xf4;
static constexpr uint8_t REPORT_FRS_WRITE_RESPONSE = 0xf5;
static constexpr uint8_t REPORT_FRS_WRITE_DATA = 0xf6;
static constexpr uint8_t REPORT_FRS_WRITE_REQUEST = 0xf7;
static constexpr uint8_t REPORT_PRODUCT_ID_RESPONSE = 0xf8;
static constexpr uint8_t REPORT_PRODUCT_ID_REQUEST = 0xf9;
static constexpr uint8_t REPORT_TIMESTAMP_REBASE = 0xfa;
static constexpr uint8_t REPORT_BASE_TIMESTAMP_REFERENCE = 0xfb;
static constexpr uint8_t REPORT_GET_FEATURE_RESPONSE = 0xfc;
static constexpr uint8_t REPORT_SET_FEATURE_COMMAND = 0xfd;
static constexpr uint8_t REPORT_GET_FEATURE_REQUEST = 0xfe;
// Commands supported by report 0xf2 / 0xf1
static constexpr uint8_t COMMAND_REPORT_ERRORS = 0x01;
static constexpr uint8_t COMMAND_COUNTS = 0x02;
static constexpr uint8_t COMMAND_TARE = 0x03;
static constexpr uint8_t COMMAND_INITIALIZE = 0x04;
static constexpr uint8_t COMMAND_RESERVED_05 = 0x05;
static constexpr uint8_t COMMAND_SAVE_DCD = 0x06;
static constexpr uint8_t COMMAND_ME_CALIBRATION = 0x07;
static constexpr uint8_t COMMAND_RESERVED_08 = 0x08;
static constexpr uint8_t COMMAND_PERIODIC_DCD_SAVE = 0x09;
static constexpr uint8_t COMMAND_GET_OSCILLATOR_TYPE = 0x0a;
static constexpr uint8_t COMMAND_CLEAR_DCD_AND_RESET = 0x0b;
static constexpr uint8_t COMMAND_CALIBRATION = 0x0c;
static constexpr uint8_t COMMAND_BOOTLOADER = 0x0d;
static constexpr uint8_t COMMAND_INTERACTIVE_CALIBRATION = 0x0e;
// Subcommands, for certain commands only
// DCD / ME / Bootloader not considered yet, here
static constexpr uint8_t SUBCOMMAND_COUNTS__GET_COUNTS = 0x00;
static constexpr uint8_t SUBCOMMAND_COUNTS__CLEAR_COUNTS = 0x01;
static constexpr uint8_t SUBCOMMAND_TARE__TARE_NOW = 0x00;
static constexpr uint8_t SUBCOMMAND_TARE__PERSIST_TARE = 0x01;
static constexpr uint8_t SUBCOMMAND_TARE__SET_REORIENTATION = 0x02;
static constexpr uint8_t SUBCOMMAND_CALIBRATION__START_CALIBRATION = 0x00;
static constexpr uint8_t SUBCOMMAND_CALIBRATION__FINISH_CALIBRATION = 0x01;
// What to tare (can be ORed)
static constexpr uint8_t TARE_AXIS_X = 1;
static constexpr uint8_t TARE_AXIS_Y = 2;
static constexpr uint8_t TARE_AXIS_Z = 4;
// Reference for tare operation
static constexpr uint8_t TARE_BASE_ROTATION_VECTOR = 0;
static constexpr uint8_t TARE_BASE_GAMING_ROTATION_VECTOR = 1;
static constexpr uint8_t TARE_BASE_GEOMAGNETIC_ROTATION_VECTOR = 2;
static constexpr uint8_t TARE_BASE_GYRO_INTEGRATED_ROTATION_VECTOR = 3;
static constexpr uint8_t TARE_BASE_ARVR_STABILIZED_ROTATION_VECTOR = 4;
static constexpr uint8_t TARE_BASE_ARVR_STABILIZED_GAME_ROTATION_VECTOR= 5;
// Sensor types (= sensor input report ID)
static constexpr uint8_t SENSOR_ACCELEROMETER = 0x01;
static constexpr uint8_t SENSOR_GYROSCOPE = 0x02;
static constexpr uint8_t SENSOR_MAGNETOMETER = 0x03;
static constexpr uint8_t SENSOR_LINEAR_ACCELERATION = 0x04;
static constexpr uint8_t SENSOR_ROTATION_VECTOR = 0x05;
static constexpr uint8_t SENSOR_GRAVITY = 0x06;
static constexpr uint8_t SENSOR_GYROSCOPE_UNCALIBRATED = 0x07;
static constexpr uint8_t SENSOR_GAME_ROTATION_VECTOR = 0x08;
static constexpr uint8_t SENSOR_GEOMAGNETIC_ROTATION = 0x09;
static constexpr uint8_t SENSOR_PRESSURE = 0x0a;
static constexpr uint8_t SENSOR_AMBIENT_LIGHT = 0x0b;
static constexpr uint8_t SENSOR_HUMIDITY = 0x0c;
static constexpr uint8_t SENSOR_PROXIMITY = 0x0d;
static constexpr uint8_t SENSOR_TEMPERATURE = 0x0e;
static constexpr uint8_t SENSOR_MAGNETOMETER_UNCALIBRATED = 0x0f;
static constexpr uint8_t SENSOR_TAP_DETECTOR = 0x10;
static constexpr uint8_t SENSOR_STEP_COUNTER = 0x11;
static constexpr uint8_t SENSOR_SIGNIFICANT_MOTION = 0x12;
static constexpr uint8_t SENSOR_STABILITY_CLASSIFIER = 0x13;
static constexpr uint8_t SENSOR_ACCELEROMETER_RAW = 0x14;
static constexpr uint8_t SENSOR_GYROSCOPE_RAW = 0x15;
static constexpr uint8_t SENSOR_MAGNETOMETER_RAW = 0x16;
static constexpr uint8_t SENSOR_STEP_DETECTOR = 0x18;
static constexpr uint8_t SENSOR_SHAKE_DETECTOR = 0x19;
static constexpr uint8_t SENSOR_FLIP_DETECTOR = 0x1a;
static constexpr uint8_t SENSOR_PICKUP_DETECTOR = 0x1b;
static constexpr uint8_t SENSOR_STABILITY_DETECTOR = 0x1c;
static constexpr uint8_t SENSOR_PERSONAL_ACTIVITY_CLASSIFIER = 0x1e;
static constexpr uint8_t SENSOR_SLEEP_DETECTOR = 0x1f;
static constexpr uint8_t SENSOR_TILT_DETECTOR = 0x20;
static constexpr uint8_t SENSOR_POCKET_DETECTOR = 0x21;
static constexpr uint8_t SENSOR_CIRCLE_DETECTOR = 0x22;
static constexpr uint8_t SENSOR_HEART_RATE_MONITOR = 0x23;
static constexpr uint8_t SENSOR_ARVR_STABILIZED_ROTATION_VECTOR = 0x28;
static constexpr uint8_t SENSOR_ARVR_STABILIZED_GAME_ROTATION_VECTOR= 0x29;
static constexpr uint8_t SENSOR_GYRO_INTEGRATED_ROTATION_VECTOR = 0x2a;
static constexpr uint8_t SENSOR_MOTION_REQUEST = 0x2b;
// FRS Configuration Response: Status/Error field
static constexpr uint8_t FRS_WRITE_RESPONSE_STATUS_WORDS_RECEIVED = 0x00;
static constexpr uint8_t FRS_WRITE_RESPONSE_STATUS_UNRECOGNIZED_FRS_TYPE = 0x01;
static constexpr uint8_t FRS_WRITE_RESPONSE_STATUS_BUSY = 0x02;
static constexpr uint8_t FRS_WRITE_RESPONSE_STATUS_WRITE_COMPLETED = 0x03;
static constexpr uint8_t FRS_WRITE_RESPONSE_STATUS_WRITE_MODE_READY = 0x04;
static constexpr uint8_t FRS_WRITE_RESPONSE_STATUS_WRITE_FAILED = 0x05;
static constexpr uint8_t FRS_WRITE_RESPONSE_STATUS_UNEXPECTED_DATA = 0x06;
static constexpr uint8_t FRS_WRITE_RESPONSE_STATUS_INVALID_LENGTH = 0x07;
static constexpr uint8_t FRS_WRITE_RESPONSE_STATUS_RECORD_VALID = 0x08;
static constexpr uint8_t FRS_WRITE_RESPONSE_STATUS_RECORD_INVALID = 0x09;
static constexpr uint8_t FRS_WRITE_RESPONSE_STATUS_DEVICE_ERROR__DEPRECATED = 0x0A;
static constexpr uint8_t FRS_WRITE_RESPONSE_STATUS_READ_ONLY_RECORD = 0x0B;
static constexpr uint8_t FRS_WRITE_RESPONSE_STATUS_CANNOT_WRITE_MEMORY_FULL = 0x0C;
};
inline uint64_t sh2GetU64(const unsigned char* d) {
return d[0] | (d[1] << 8) | (d[2] << 16) | (d[3] << 24)
| (static_cast<uint64_t>(d[4]) << 32) | (static_cast<uint64_t>(d[5]) << 40)
| (static_cast<uint64_t>(d[6]) << 48) | (static_cast<uint64_t>(d[7]) << 56);
}
inline uint32_t sh2GetU32(const unsigned char* d) {
return d[0] | (d[1] << 8) | (d[2] << 16) | (d[3] << 24);
}
inline uint16_t sh2GetU16(const unsigned char* d) {
return d[0] | (d[1] << 8);
}
inline uint8_t sh2GetU8(const unsigned char* d) {
return d[0];
}
inline double sh2ConvertFixedQ16(uint16_t v, unsigned char q) {
return (double) ((int16_t) v) / (double)(1 << q);
}
inline int sh2GetSensorReportLength(unsigned int sensorReportID) {
switch(sensorReportID) {
case SH2Constants::SENSOR_ACCELEROMETER: return 10; //ID 0x01
case SH2Constants::SENSOR_GYROSCOPE: return 10; //ID 0x02
case SH2Constants::SENSOR_MAGNETOMETER: return 10; //ID 0x03
case SH2Constants::SENSOR_LINEAR_ACCELERATION: return 10; //ID 0x04
case SH2Constants::SENSOR_ROTATION_VECTOR: return 14; //ID 0x05
case SH2Constants::SENSOR_GRAVITY: return 10; //ID 0x06
case SH2Constants::SENSOR_GYROSCOPE_UNCALIBRATED: return 16; //ID 0x07
case SH2Constants::SENSOR_GAME_ROTATION_VECTOR: return 12; //ID 0x08
case SH2Constants::SENSOR_GEOMAGNETIC_ROTATION: return 14; //ID 0x09
case SH2Constants::SENSOR_PRESSURE: return 8; //ID 0x0a
case SH2Constants::SENSOR_AMBIENT_LIGHT: return 8; //ID 0x0b
case SH2Constants::SENSOR_HUMIDITY: return 6; //ID 0x0c
case SH2Constants::SENSOR_PROXIMITY: return 6; //ID 0x0d
case SH2Constants::SENSOR_TEMPERATURE: return 6; //ID 0x0e
case SH2Constants::SENSOR_MAGNETOMETER_UNCALIBRATED: return 16; //ID 0x0f
case SH2Constants::SENSOR_TAP_DETECTOR: return 5; //ID 0x10
case SH2Constants::SENSOR_STEP_COUNTER: return 12; //ID 0x11
case SH2Constants::SENSOR_SIGNIFICANT_MOTION: return 6; //ID 0x12
case SH2Constants::SENSOR_STABILITY_CLASSIFIER: return 6; //ID 0x13
case SH2Constants::SENSOR_ACCELEROMETER_RAW: return 16; //ID 0x14
case SH2Constants::SENSOR_GYROSCOPE_RAW: return 16; //ID 0x15
case SH2Constants::SENSOR_MAGNETOMETER_RAW: return 16; //ID 0x16
case SH2Constants::SENSOR_STEP_DETECTOR: return 8; //ID 0x18
case SH2Constants::SENSOR_SHAKE_DETECTOR: return 6; //ID 0x19
case SH2Constants::SENSOR_FLIP_DETECTOR: return 6; //ID 0x1a
case SH2Constants::SENSOR_PICKUP_DETECTOR: return 6; //ID 0x1b
case SH2Constants::SENSOR_STABILITY_DETECTOR: return 6; //ID 0x1c
case SH2Constants::SENSOR_PERSONAL_ACTIVITY_CLASSIFIER: return 16; //ID 0x1e
case SH2Constants::SENSOR_SLEEP_DETECTOR: return 6; //ID 0x1f
case SH2Constants::SENSOR_TILT_DETECTOR: return 6; //ID 0x20
case SH2Constants::SENSOR_POCKET_DETECTOR: return 6; //ID 0x21
case SH2Constants::SENSOR_CIRCLE_DETECTOR: return 6; //ID 0x22
case SH2Constants::SENSOR_HEART_RATE_MONITOR: return 6; //ID 0x23
case SH2Constants::SENSOR_ARVR_STABILIZED_ROTATION_VECTOR: return 14; //ID 0x28
case SH2Constants::SENSOR_ARVR_STABILIZED_GAME_ROTATION_VECTOR: return 12; //ID 0x29
case SH2Constants::SENSOR_GYRO_INTEGRATED_ROTATION_VECTOR: return 14; //ID 0x2a
case SH2Constants::SENSOR_MOTION_REQUEST: return 6; //ID 0x2b
default: return -1;
}
}
// The Q point for fixed-point values, i.e. the base 2 exponent for division
// for consistency reasons also 0 for N/A / undefined
inline int sh2GetSensorQPoint(unsigned int sensorReportID) {
switch(sensorReportID) {
case SH2Constants::SENSOR_ACCELEROMETER: return 8; //ID 0x01
case SH2Constants::SENSOR_GYROSCOPE: return 9; //ID 0x02
case SH2Constants::SENSOR_MAGNETOMETER: return 4; //ID 0x03
case SH2Constants::SENSOR_LINEAR_ACCELERATION: return 8; //ID 0x04
case SH2Constants::SENSOR_ROTATION_VECTOR: return 14; //ID 0x05 // but 12 for accuracy fields
case SH2Constants::SENSOR_GRAVITY: return 8; //ID 0x06
case SH2Constants::SENSOR_GYROSCOPE_UNCALIBRATED: return 9; //ID 0x07
case SH2Constants::SENSOR_GAME_ROTATION_VECTOR: return 14; //ID 0x08
case SH2Constants::SENSOR_GEOMAGNETIC_ROTATION: return 14; //ID 0x09
case SH2Constants::SENSOR_PRESSURE: return 20; //ID 0x0a
case SH2Constants::SENSOR_AMBIENT_LIGHT: return 8; //ID 0x0b
case SH2Constants::SENSOR_HUMIDITY: return 8; //ID 0x0c
case SH2Constants::SENSOR_PROXIMITY: return 4; //ID 0x0d
case SH2Constants::SENSOR_TEMPERATURE: return 7; //ID 0x0e
case SH2Constants::SENSOR_MAGNETOMETER_UNCALIBRATED: return 4; //ID 0x0f
case SH2Constants::SENSOR_TAP_DETECTOR: return 0; //ID 0x10
case SH2Constants::SENSOR_STEP_COUNTER: return 0; //ID 0x11
case SH2Constants::SENSOR_SIGNIFICANT_MOTION: return 0; //ID 0x12
case SH2Constants::SENSOR_STABILITY_CLASSIFIER: return 0; //ID 0x13
case SH2Constants::SENSOR_ACCELEROMETER_RAW: return 0; //ID 0x14
case SH2Constants::SENSOR_GYROSCOPE_RAW: return 0; //ID 0x15
case SH2Constants::SENSOR_MAGNETOMETER_RAW: return 0; //ID 0x16
case SH2Constants::SENSOR_STEP_DETECTOR: return 0; //ID 0x18
case SH2Constants::SENSOR_SHAKE_DETECTOR: return 0; //ID 0x19
case SH2Constants::SENSOR_FLIP_DETECTOR: return 0; //ID 0x1a
case SH2Constants::SENSOR_PICKUP_DETECTOR: return 0; //ID 0x1b
case SH2Constants::SENSOR_STABILITY_DETECTOR: return 0; //ID 0x1c
case SH2Constants::SENSOR_PERSONAL_ACTIVITY_CLASSIFIER: return 0; //ID 0x1e
case SH2Constants::SENSOR_SLEEP_DETECTOR: return 0; //ID 0x1f
case SH2Constants::SENSOR_TILT_DETECTOR: return 0; //ID 0x20
case SH2Constants::SENSOR_POCKET_DETECTOR: return 0; //ID 0x21
case SH2Constants::SENSOR_CIRCLE_DETECTOR: return 0; //ID 0x22
case SH2Constants::SENSOR_HEART_RATE_MONITOR: return 0; //ID 0x23
case SH2Constants::SENSOR_ARVR_STABILIZED_ROTATION_VECTOR: return 14; //ID 0x28
case SH2Constants::SENSOR_ARVR_STABILIZED_GAME_ROTATION_VECTOR: return 14; //ID 0x29
case SH2Constants::SENSOR_GYRO_INTEGRATED_ROTATION_VECTOR: return 14; //ID 0x2a // but 10 for angular velocity
case SH2Constants::SENSOR_MOTION_REQUEST: return 0; //ID 0x2b
default: return 0;
}
}
inline const char* sh2GetSensorName(unsigned int sensorReportID) {
switch(sensorReportID) {
case SH2Constants::SENSOR_ACCELEROMETER: return "Accelerometer";
case SH2Constants::SENSOR_GYROSCOPE: return "Gyroscope";
case SH2Constants::SENSOR_MAGNETOMETER: return "Magnetometer";
case SH2Constants::SENSOR_LINEAR_ACCELERATION: return "Linear Acceleration";
case SH2Constants::SENSOR_ROTATION_VECTOR: return "Rotation Vector";
case SH2Constants::SENSOR_GRAVITY: return "Gravity";
case SH2Constants::SENSOR_GYROSCOPE_UNCALIBRATED: return "Gyroscope Uncalibrated";
case SH2Constants::SENSOR_GAME_ROTATION_VECTOR: return "Game Rotation Vector";
case SH2Constants::SENSOR_GEOMAGNETIC_ROTATION: return "Geomagnetic Rotation";
case SH2Constants::SENSOR_PRESSURE: return "Pressure";
case SH2Constants::SENSOR_AMBIENT_LIGHT: return "Ambient Light";
case SH2Constants::SENSOR_HUMIDITY: return "Humidity";
case SH2Constants::SENSOR_PROXIMITY: return "Proximity";
case SH2Constants::SENSOR_TEMPERATURE: return "Temperature";
case SH2Constants::SENSOR_MAGNETOMETER_UNCALIBRATED: return "Magnetometer Uncalibrated";
case SH2Constants::SENSOR_TAP_DETECTOR: return "Tap Detector";
case SH2Constants::SENSOR_STEP_COUNTER: return "Step Counter";
case SH2Constants::SENSOR_SIGNIFICANT_MOTION: return "Significant Motion";
case SH2Constants::SENSOR_STABILITY_CLASSIFIER: return "Stability Classifier";
case SH2Constants::SENSOR_ACCELEROMETER_RAW: return "Accelerometer Raw";
case SH2Constants::SENSOR_GYROSCOPE_RAW: return "Gyroscope Raw";
case SH2Constants::SENSOR_MAGNETOMETER_RAW: return "Magnetometer Raw";
case SH2Constants::SENSOR_STEP_DETECTOR: return "Step Detector";
case SH2Constants::SENSOR_SHAKE_DETECTOR: return "Shake Detector";
case SH2Constants::SENSOR_FLIP_DETECTOR: return "Flip Detector";
case SH2Constants::SENSOR_PICKUP_DETECTOR: return "Pickup Detector";
case SH2Constants::SENSOR_STABILITY_DETECTOR: return "Stability Detector";
case SH2Constants::SENSOR_PERSONAL_ACTIVITY_CLASSIFIER: return "Personal Activity Classifier";
case SH2Constants::SENSOR_SLEEP_DETECTOR: return "Sleep Detector";
case SH2Constants::SENSOR_TILT_DETECTOR: return "Tilt Detector";
case SH2Constants::SENSOR_POCKET_DETECTOR: return "Pocket Detector";
case SH2Constants::SENSOR_CIRCLE_DETECTOR: return "Circle Detector";
case SH2Constants::SENSOR_HEART_RATE_MONITOR: return "Heart Rate Monitor";
case SH2Constants::SENSOR_ARVR_STABILIZED_ROTATION_VECTOR: return "ARVR-Stabilized Rotation Vector";
case SH2Constants::SENSOR_ARVR_STABILIZED_GAME_ROTATION_VECTOR: return "ARVR-Stabilized Game Rotation Vector";
case SH2Constants::SENSOR_GYRO_INTEGRATED_ROTATION_VECTOR: return "Gyro-Integrated Rotation Vector";
case SH2Constants::SENSOR_MOTION_REQUEST: return "Motion Request";
default: return "UNKNOWN";
}
}
// Convenience function to return the appropriate unit string, if applicable
inline const char* sh2GetSensorUnit(unsigned int sensorReportID) {
switch(sensorReportID) {
case SH2Constants::SENSOR_ACCELEROMETER: //ID 0x01
case SH2Constants::SENSOR_LINEAR_ACCELERATION: //ID 0x04
case SH2Constants::SENSOR_GRAVITY: return "m/s²"; //ID 0x06
case SH2Constants::SENSOR_GYROSCOPE: //ID 0x02
case SH2Constants::SENSOR_GYROSCOPE_UNCALIBRATED: return "rad/s"; //ID 0x07
case SH2Constants::SENSOR_MAGNETOMETER: //ID 0x03
case SH2Constants::SENSOR_MAGNETOMETER_UNCALIBRATED: return "μT"; //ID 0x0f
case SH2Constants::SENSOR_PRESSURE: return "hPa"; //ID 0x0a
case SH2Constants::SENSOR_AMBIENT_LIGHT: return "lx"; //ID 0x0b "cd/m²"
case SH2Constants::SENSOR_HUMIDITY: return "%"; //ID 0x0c
case SH2Constants::SENSOR_PROXIMITY: return "cm"; //ID 0x0d
case SH2Constants::SENSOR_TEMPERATURE: return "°C"; //ID 0x0e
default: return "";
}
}
inline const char* sh2GetCommandName(unsigned int cmdID) {
static const char* cmdNames[] = {"Reserved", "Errors", "Counter", "Tare", "Initialize",
"Reserved", "Save DCD", "ME Calibration", "Reserved", "Periodic DCD Save", "Get Oscillator Type",
"Clear DCD and Reset", "Calibration", "Bootloader", "Interactive Calibration"};
if (cmdID < sizeof(cmdNames)) return cmdNames[cmdID];
else return "Unknown";
}
#pragma pack(push,1) // Packed struct definitions from SH-2, co-opted for transfer
// Common prefix for all SH-2 cargos. SHTP headers irrelevant and not represented.
class SH2CargoBase {
private:
uint8_t cargoLength[2];
uint8_t channel;
uint8_t sequenceNumber;
uint8_t reportType;
public:
inline uint16_t getCargoLength() const { return sh2GetU16(cargoLength) & 0x7fff; } // mask out subtransfer bit
inline uint8_t getChannel() const { return channel; }
inline uint8_t getSequenceNumber() const { return sequenceNumber; }
inline uint8_t getReportType() const { return reportType; }
};
// Our own custom extension for sending the raw interrupt timestamp (report 0xFF, never reported by SH2)
class SH2CargoBodyScenescanTimestamp {
private:
SH2CargoBase base;
uint8_t usecSinceEpoch[8]; // 64-bit microsecond count
public:
inline uint64_t getUSecSinceEpoch() const { return (uint64_t) sh2GetU64(usecSinceEpoch); }
};
// A Timestamp Rebase (0xFA), reporting additional sensor delay offset since last Timebase
class SH2CargoBodyTimestampRebase {
private:
SH2CargoBase base;
uint8_t rebaseTime[4];
public:
inline long getRebaseTime() const { return (int32_t) sh2GetU32(rebaseTime); }
};
// A Time Base report with a batch transfer (0xFB)
// It may be followed by any amount of sensor reports, below.
// Refer to the base.getCargoLength() value and the known
// record sizes for parsing them.
class SH2CargoBodyTimeBase {
private:
SH2CargoBase base;
uint8_t timeBase_100uSec[4];
public:
inline long getTimeBase() const { return 100l * sh2GetU32(timeBase_100uSec); }
};
// Common base prefix for all sensor reports
class SH2SensorReportBase {
private:
uint8_t sensorID;
uint8_t sequenceNumber;
uint8_t statusAndDelayMSB;
uint8_t delayLSB;
public:
inline unsigned int getStatus() const { return statusAndDelayMSB & 0x03; }
inline unsigned int getDelay() const { return ((statusAndDelayMSB & 0xfc) << 6) | delayLSB; }
};
// 10-byte reports with individual Q scaling for non-raw 3D sensors
class SH2SensorReportAccelerometer {
private:
SH2SensorReportBase base;
uint8_t xAxis[2];
uint8_t yAxis[2];
uint8_t zAxis[2];
public:
inline double getX() const { return sh2ConvertFixedQ16(sh2GetU16(xAxis), 8); } // Accel Q: shift 8 bits
inline double getY() const { return sh2ConvertFixedQ16(sh2GetU16(yAxis), 8); }
inline double getZ() const { return sh2ConvertFixedQ16(sh2GetU16(zAxis), 8); }
};
class SH2SensorReportMagnetometer {
private:
SH2SensorReportBase base;
uint8_t xAxis[2];
uint8_t yAxis[2];
uint8_t zAxis[2];
public:
inline double getX() const { return sh2ConvertFixedQ16(sh2GetU16(xAxis), 4); } // Magn Q: shift 4 bits
inline double getY() const { return sh2ConvertFixedQ16(sh2GetU16(yAxis), 4); }
inline double getZ() const { return sh2ConvertFixedQ16(sh2GetU16(zAxis), 4); }
};
class SH2SensorReportGyroscope {
private:
SH2SensorReportBase base;
uint8_t xAxis[2];
uint8_t yAxis[2];
uint8_t zAxis[2];
public:
inline double getX() const { return sh2ConvertFixedQ16(sh2GetU16(xAxis), 9); } // Gyro Q: shift 9 bits
inline double getY() const { return sh2ConvertFixedQ16(sh2GetU16(yAxis), 9); }
inline double getZ() const { return sh2ConvertFixedQ16(sh2GetU16(zAxis), 9); }
};
// 14-byte orientation (quaternion) data. i,j,k,real are also known as x,y,z,w
class SH2SensorReportOrientation {
private:
SH2SensorReportBase base;
uint8_t quatI[2];
uint8_t quatJ[2];
uint8_t quatK[2];
uint8_t quatReal[2];
uint8_t accuracy[2];
public:
inline double getI() const { return sh2ConvertFixedQ16(sh2GetU16(quatI), 14); } // Quaternion data: shift 14 bits
inline double getJ() const { return sh2ConvertFixedQ16(sh2GetU16(quatJ), 14); }
inline double getK() const { return sh2ConvertFixedQ16(sh2GetU16(quatK), 14); }
inline double getReal() const { return sh2ConvertFixedQ16(sh2GetU16(quatReal), 14); }
inline double getAccuracy() const { return sh2ConvertFixedQ16(sh2GetU16(accuracy), 12); } // Accuracy: shift 12
};
// 6-byte 1D sensor (pressure, ambient light, humidity, proximity, temperature,
// 16-byte data for *raw* accelerometer, gyro, magnetometer
class SH2SensorReportRawAGM {
private:
SH2SensorReportBase base;
uint8_t xAxisRaw[2];
uint8_t yAxisRaw[2];
uint8_t zAxisRaw[2];
uint8_t temperature_forGyro[2];
uint8_t timestamp[4];
};
#pragma pack(pop) // End of common sensor data / transport packed struct definitions
}} // namespaces
#endif

View file

@ -0,0 +1,144 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_RECONSTRUCT3D_OPEN3D_H
#define VISIONTRANSFER_RECONSTRUCT3D_OPEN3D_H
#ifdef OPEN3D_VERSION
namespace visiontransfer {
/*
* Open3d-specific implementations that need to be inlined in order to avoid
* dependencies for projects that do not make use of Open3D
*/
inline std::shared_ptr<open3d::geometry::PointCloud> Reconstruct3D::createOpen3DCloud(
const ImageSet& imageSet, bool withColor, unsigned short minDisparity) {
int numPoints = imageSet.getWidth() * imageSet.getHeight();
std::shared_ptr<open3d::geometry::PointCloud> ret(new open3d::geometry::PointCloud());
// Convert the 3D point cloud
ret->points_.resize(numPoints);
float* points = createPointMap(imageSet, minDisparity);
float* end = &points[4*numPoints];
Eigen::Vector3d* dest = &ret->points_[0];
while(points != end) {
float x = *(points++);
float y = *(points++);
float z = *(points++);
points++;
*dest = Eigen::Vector3d(x, y, z);
dest++;
}
// Convert the color information if enabled
if(withColor && imageSet.hasImageType(ImageSet::IMAGE_LEFT)) {
ret->colors_.resize(numPoints);
unsigned char* pixel = imageSet.getPixelData(ImageSet::IMAGE_LEFT);
Eigen::Vector3d* color = &ret->colors_[0];
Eigen::Vector3d* colorEnd = &ret->colors_[numPoints];
switch(imageSet.getPixelFormat(ImageSet::IMAGE_LEFT)) {
case ImageSet::FORMAT_8_BIT_MONO:
while(color != colorEnd) {
double col = double(*(pixel++))/0xFF;
*(color++) = Eigen::Vector3d(col, col, col);
}
break;
case ImageSet::FORMAT_12_BIT_MONO:
while(color != colorEnd) {
double col = double(*reinterpret_cast<unsigned short*>(pixel))/0xFFF;
pixel+=2;
*(color++) = Eigen::Vector3d(col, col, col);
}
break;
case ImageSet::FORMAT_8_BIT_RGB:
while(color != colorEnd) {
double r = double(*(pixel++))/0xFF;
double g = double(*(pixel++))/0xFF;
double b = double(*(pixel++))/0xFF;
*(color++) = Eigen::Vector3d(r, g, b);
}
break;
default: throw std::runtime_error("Illegal pixel format");
}
}
return ret;
}
inline std::shared_ptr<open3d::geometry::RGBDImage> Reconstruct3D::createOpen3DImageRGBD(const ImageSet& imageSet,
unsigned short minDisparity) {
std::shared_ptr<open3d::geometry::RGBDImage> ret(new open3d::geometry::RGBDImage);
// Convert depth map
ret->depth_.width_ = imageSet.getWidth();
ret->depth_.height_ = imageSet.getHeight();
ret->depth_.num_of_channels_ = 1;
ret->depth_.bytes_per_channel_ = sizeof(float);
ret->depth_.data_.resize(ret->depth_.width_*ret->depth_.height_*ret->depth_.bytes_per_channel_);
float* zMap = createZMap(imageSet, minDisparity);
memcpy(&ret->depth_.data_[0], zMap, ret->depth_.data_.size());
// Convert color
ret->color_.width_ = imageSet.getWidth();
ret->color_.height_ = imageSet.getHeight();
ret->color_.num_of_channels_ = 3;
ret->color_.bytes_per_channel_ = 1;
ret->color_.data_.resize(ret->color_.width_ * ret->color_.height_ *
ret->color_.num_of_channels_ * ret->color_.bytes_per_channel_);
unsigned char* srcPixel = imageSet.getPixelData(ImageSet::IMAGE_LEFT);
unsigned char* dstPixel = &ret->color_.data_[0];
unsigned char* dstEnd = &ret->color_.data_[ret->color_.data_.size()];
switch(imageSet.getPixelFormat(ImageSet::IMAGE_LEFT)) {
case ImageSet::FORMAT_8_BIT_MONO:
while(dstPixel != dstEnd) {
*(dstPixel++) = *srcPixel;
*(dstPixel++) = *srcPixel;
*(dstPixel++) = *(srcPixel++);
}
break;
case ImageSet::FORMAT_12_BIT_MONO:
while(dstPixel != dstEnd) {
unsigned short pixel16Bit = *reinterpret_cast<unsigned short*>(srcPixel);
unsigned char pixel8Bit = pixel16Bit / 0xF;
srcPixel += 2;
*(dstPixel++) = pixel8Bit;
*(dstPixel++) = pixel8Bit;
*(dstPixel++) = pixel8Bit;
}
break;
case ImageSet::FORMAT_8_BIT_RGB:
memcpy(&ret->color_.data_[0], srcPixel, ret->color_.data_.size());
break;
default: throw std::runtime_error("Illegal pixel format");
}
return ret;
}
} // namespace
#endif
#endif

View file

@ -0,0 +1,125 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_RECONSTRUCT3D_PCL_H
#define VISIONTRANSFER_RECONSTRUCT3D_PCL_H
#ifdef PCL_MAJOR_VERSION
namespace visiontransfer {
/*
* PCL-specific implementations that need to be inlined in order to avoid
* dependencies for projects that do not make use of PCL
*/
template <typename T>
typename pcl::PointCloud<T>::Ptr Reconstruct3D::initPointCloud(const ImageSet& imageSet, const char* frameId) {
int sec, microsec;
imageSet.getTimestamp(sec, microsec);
typename pcl::PointCloud<T>::Ptr ret(
new pcl::PointCloud<T>(imageSet.getWidth(), imageSet.getHeight()));
ret->header.frame_id = frameId;
ret->header.seq = imageSet.getSequenceNumber();
ret->header.stamp = sec * 1000000LL + microsec;
ret->width = imageSet.getWidth();
ret->height = imageSet.getHeight();
ret->is_dense = true;
return ret;
}
inline pcl::PointCloud<pcl::PointXYZ>::Ptr Reconstruct3D::createXYZCloud(const ImageSet& imageSet,
const char* frameId, unsigned short minDisparity) {
float* pointMap = createPointMap(imageSet, minDisparity);
pcl::PointCloud<pcl::PointXYZ>::Ptr ret = initPointCloud<pcl::PointXYZ>(imageSet, frameId);
memcpy(&ret->points[0].x, pointMap, ret->width*ret->height*sizeof(float)*4);
return ret;
}
inline pcl::PointCloud<pcl::PointXYZI>::Ptr Reconstruct3D::createXYZICloud(const ImageSet& imageSet,
const char* frameId, unsigned short minDisparity) {
float* pointMap = createPointMap(imageSet, minDisparity);
pcl::PointCloud<pcl::PointXYZI>::Ptr ret = initPointCloud<pcl::PointXYZI>(imageSet, frameId);
pcl::PointXYZI* dstPtr = &ret->points[0];
if(imageSet.getPixelFormat(0) == ImageSet::FORMAT_8_BIT_MONO) {
for(int y = 0; y < imageSet.getHeight(); y++) {
unsigned char* rowPtr = imageSet.getPixelData(0) + y*imageSet.getRowStride(0);
unsigned char* endPtr = rowPtr + imageSet.getWidth();
for(; rowPtr < endPtr; rowPtr++) {
dstPtr->intensity = static_cast<float>(*rowPtr)/255.0F;
dstPtr->x = *pointMap++;
dstPtr->y = *pointMap++;
dstPtr->z = *pointMap++;
pointMap++;
dstPtr++;
}
}
} else if(imageSet.getPixelFormat(0) == ImageSet::FORMAT_12_BIT_MONO) {
for(int y = 0; y < imageSet.getHeight(); y++) {
unsigned short* rowPtr = reinterpret_cast<unsigned short*>(imageSet.getPixelData(0) + y*imageSet.getRowStride(0));
unsigned short* endPtr = rowPtr + imageSet.getWidth();
for(; rowPtr < endPtr; rowPtr++) {
dstPtr->intensity = static_cast<float>(*rowPtr)/4095.0F;
dstPtr->x = *pointMap++;
dstPtr->y = *pointMap++;
dstPtr->z = *pointMap++;
pointMap++;
dstPtr++;
}
}
} else {
throw std::runtime_error("Left image does not have a valid greyscale format");
}
return ret;
}
inline pcl::PointCloud<pcl::PointXYZRGB>::Ptr Reconstruct3D::createXYZRGBCloud(const ImageSet& imageSet,
const char* frameId, unsigned short minDisparity) {
float* pointMap = createPointMap(imageSet, minDisparity);
pcl::PointCloud<pcl::PointXYZRGB>::Ptr ret = initPointCloud<pcl::PointXYZRGB>(imageSet, frameId);
pcl::PointXYZRGB* dstPtr = &ret->points[0];
if(imageSet.getPixelFormat(0) != ImageSet::FORMAT_8_BIT_RGB) {
throw std::runtime_error("Left image is not an RGB image");
}
for(int y = 0; y < imageSet.getHeight(); y++) {
unsigned char* rowPtr = imageSet.getPixelData(0) + y*imageSet.getRowStride(0);
unsigned char* endPtr = rowPtr + 3*imageSet.getWidth();
for(; rowPtr < endPtr;rowPtr +=3) {
dstPtr->r = rowPtr[0];
dstPtr->g = rowPtr[1];
dstPtr->b = rowPtr[2];
dstPtr->x = *pointMap++;
dstPtr->y = *pointMap++;
dstPtr->z = *pointMap++;
pointMap++;
dstPtr++;
}
}
return ret;
}
} // namespace
#endif
#endif

View file

@ -0,0 +1,556 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include "reconstruct3d.h"
#include "visiontransfer/alignedallocator.h"
#include <vector>
#include <cstring>
#include <algorithm>
#include <fstream>
#include <stdexcept>
#include <cmath>
// SIMD Headers
#ifdef __AVX2__
#include <immintrin.h>
#elif __SSE2__
#include <emmintrin.h>
#endif
using namespace std;
using namespace visiontransfer;
using namespace visiontransfer::internal;
namespace visiontransfer {
/*************** Pimpl class containing all private members ***********/
class Reconstruct3D::Pimpl {
public:
Pimpl();
float* createPointMap(const unsigned short* dispMap, int width, int height,
int rowStride, const float* q, unsigned short minDisparity, int subpixelFactor,
unsigned short maxDisparity);
float* createPointMap(const ImageSet& imageSet, unsigned short minDisparity);
float* createZMap(const ImageSet& imageSet, unsigned short minDisparity, unsigned short maxDisparity);
void projectSinglePoint(int imageX, int imageY, unsigned short disparity, const float* q,
float& pointX, float& pointY, float& pointZ, int subpixelFactor);
void writePlyFile(const char* file, const unsigned short* dispMap,
const unsigned char* image, int width, int height, ImageSet::ImageFormat format,
int dispRowStride, int imageRowStride, const float* q,
double maxZ, bool binary, int subpixelFactor, unsigned short maxDisparity);
void writePlyFile(const char* file, const ImageSet& imageSet,
double maxZ, bool binary);
private:
std::vector<float, AlignedAllocator<float> > pointMap;
float* createPointMapFallback(const unsigned short* dispMap, int width, int height,
int rowStride, const float* q, unsigned short minDisparity, int subpixelFactor,
unsigned short maxDisparity);
float* createPointMapSSE2(const unsigned short* dispMap, int width, int height,
int rowStride, const float* q, unsigned short minDisparity, int subpixelFactor,
unsigned short maxDisparity);
float* createPointMapAVX2(const unsigned short* dispMap, int width, int height,
int rowStride, const float* q, unsigned short minDisparity, int subpixelFactor,
unsigned short maxDisparity);
};
/******************** Stubs for all public members ********************/
Reconstruct3D::Reconstruct3D()
:pimpl(new Pimpl) {
}
Reconstruct3D::~Reconstruct3D() {
delete pimpl;
}
float* Reconstruct3D::createPointMap(const unsigned short* dispMap, int width, int height,
int rowStride, const float* q, unsigned short minDisparity, int subpixelFactor,
unsigned short maxDisparity) {
return pimpl->createPointMap(dispMap, width, height, rowStride, q, minDisparity,
subpixelFactor, maxDisparity);
}
float* Reconstruct3D::createPointMap(const ImageSet& imageSet, unsigned short minDisparity) {
return pimpl->createPointMap(imageSet, minDisparity);
}
float* Reconstruct3D::createZMap(const ImageSet& imageSet, unsigned short minDisparity,
unsigned short maxDisparity) {
return pimpl->createZMap(imageSet, minDisparity, maxDisparity);
}
void Reconstruct3D::projectSinglePoint(int imageX, int imageY, unsigned short disparity,
const float* q, float& pointX, float& pointY, float& pointZ, int subpixelFactor) {
pimpl->projectSinglePoint(imageX, imageY, disparity, q, pointX, pointY, pointZ,
subpixelFactor);
}
void Reconstruct3D::writePlyFile(const char* file, const unsigned short* dispMap,
const unsigned char* image, int width, int height, ImageSet::ImageFormat format, int dispRowStride,
int imageRowStride, const float* q, double maxZ, bool binary, int subpixelFactor,
unsigned short maxDisparity) {
pimpl->writePlyFile(file, dispMap, image, width, height, format, dispRowStride,
imageRowStride, q, maxZ, binary, subpixelFactor, maxDisparity);
}
void Reconstruct3D::writePlyFile(const char* file, const ImageSet& imageSet,
double maxZ, bool binary) {
pimpl->writePlyFile(file, imageSet, maxZ, binary);
}
/******************** Implementation in pimpl class *******************/
Reconstruct3D::Pimpl::Pimpl() {
}
float* Reconstruct3D::Pimpl::createPointMap(const unsigned short* dispMap, int width,
int height, int rowStride, const float* q, unsigned short minDisparity,
int subpixelFactor, unsigned short maxDisparity) {
// Allocate the buffer
if(pointMap.size() < static_cast<unsigned int>(4*width*height)) {
pointMap.resize(4*width*height);
}
# ifdef __AVX2__
if(width % 16 == 0 && (uintptr_t)dispMap % 32 == 0) {
return createPointMapAVX2(dispMap, width, height, rowStride, q,
minDisparity, subpixelFactor, maxDisparity);
} else
# endif
# ifdef __SSE2__
if(width % 8 == 0 && (uintptr_t)dispMap % 16 == 0) {
return createPointMapSSE2(dispMap, width, height, rowStride, q,
minDisparity, subpixelFactor, maxDisparity);
} else
# endif
return createPointMapFallback(dispMap, width, height, rowStride, q,
minDisparity, subpixelFactor, maxDisparity);
}
float* Reconstruct3D::Pimpl::createPointMap(const ImageSet& imageSet, unsigned short minDisparity) {
if(!imageSet.hasImageType(ImageSet::IMAGE_DISPARITY)) {
throw std::runtime_error("ImageSet does not contain a disparity map!");
}
if(imageSet.getPixelFormat(ImageSet::IMAGE_DISPARITY) != ImageSet::FORMAT_12_BIT_MONO) {
throw std::runtime_error("Disparity map must have 12-bit pixel format!");
}
return createPointMap(reinterpret_cast<unsigned short*>(imageSet.getPixelData(ImageSet::IMAGE_DISPARITY)), imageSet.getWidth(),
imageSet.getHeight(), imageSet.getRowStride(ImageSet::IMAGE_DISPARITY), imageSet.getQMatrix(), minDisparity,
imageSet.getSubpixelFactor(), 0xFFF);
}
float* Reconstruct3D::Pimpl::createPointMapFallback(const unsigned short* dispMap, int width,
int height, int rowStride, const float* q, unsigned short minDisparity,
int subpixelFactor, unsigned short maxDisparity) {
// Code without SSE or AVX optimization
float* outputPtr = &pointMap[0];
int stride = rowStride / 2;
for(int y = 0; y < height; y++) {
double qx = q[1]*y + q[3];
double qy = q[5]*y + q[7];
double qz = q[9]*y + q[11];
double qw = q[13]*y + q[15];
const unsigned short* dispRow = &dispMap[y*stride];
for(int x = 0; x < width; x++) {
unsigned short intDisp = std::max(minDisparity, dispRow[x]);
if(intDisp >= maxDisparity) {
intDisp = minDisparity; // Invalid disparity
}
double d = intDisp / double(subpixelFactor);
double w = qw + q[14]*d;
*outputPtr = static_cast<float>((qx + q[2]*d)/w); // x
outputPtr++;
*outputPtr = static_cast<float>((qy + q[6]*d)/w); // y
outputPtr++;
*outputPtr = static_cast<float>((qz + q[10]*d)/w); // z
outputPtr+=2; // Consider padding
qx += q[0];
qy += q[4];
qz += q[8];
qw += q[12];
}
}
return &pointMap[0];
}
float* Reconstruct3D::Pimpl::createZMap(const ImageSet& imageSet, unsigned short minDisparity,
unsigned short maxDisparity) {
// Allocate the buffer
if(pointMap.size() < static_cast<unsigned int>(imageSet.getWidth()*imageSet.getHeight())) {
pointMap.resize(imageSet.getWidth()*imageSet.getHeight());
}
float* outputPtr = &pointMap[0];
int stride = imageSet.getRowStride(ImageSet::IMAGE_DISPARITY) / 2;
const unsigned short* dispMap = reinterpret_cast<const unsigned short*>(imageSet.getPixelData(ImageSet::IMAGE_DISPARITY));
int subpixelFactor = imageSet.getSubpixelFactor();
const float* q = imageSet.getQMatrix();
for(int y = 0; y < imageSet.getHeight(); y++) {
double qz = q[9]*y + q[11];
double qw = q[13]*y + q[15];
const unsigned short* dispRow = &dispMap[y*stride];
for(int x = 0; x < imageSet.getWidth(); x++) {
unsigned short intDisp = std::max(minDisparity, dispRow[x]);
if(intDisp >= maxDisparity) {
intDisp = minDisparity; // Invalid disparity
}
double d = intDisp / double(subpixelFactor);
double w = qw + q[14]*d;
*outputPtr = static_cast<float>((qz + q[10]*d)/w); // z
outputPtr++;
qz += q[8];
}
}
return &pointMap[0];
}
void Reconstruct3D::Pimpl::projectSinglePoint(int imageX, int imageY, unsigned short disparity,
const float* q, float& pointX, float& pointY, float& pointZ, int subpixelFactor) {
double d = disparity / double(subpixelFactor);
double w = q[15] + q[14]*d;
pointX = static_cast<float>((imageX*q[0] + q[3])/w);
pointY = static_cast<float>((imageY*q[5] + q[7])/w);
pointZ = static_cast<float>(q[11]/w);
}
# ifdef __AVX2__
float* Reconstruct3D::Pimpl::createPointMapAVX2(const unsigned short* dispMap, int width,
int height, int rowStride, const float* q, unsigned short minDisparity,
int subpixelFactor, unsigned short maxDisparity) {
// Create column vectors of q
const __m256 qCol0 = _mm256_setr_ps(q[0], q[4], q[8], q[12], q[0], q[4], q[8], q[12]);
const __m256 qCol1 = _mm256_setr_ps(q[1], q[5], q[9], q[13], q[1], q[5], q[9], q[13]);
const __m256 qCol2 = _mm256_setr_ps(q[2], q[6], q[10], q[14], q[2], q[6], q[10], q[14]);
const __m256 qCol3 = _mm256_setr_ps(q[3], q[7], q[11], q[15], q[3], q[7], q[11], q[15]);
// More constants that we need
const __m256i minDispVector = _mm256_set1_epi16(minDisparity);
const __m256i maxDispVector = _mm256_set1_epi16(maxDisparity);
const __m256 scaleVector = _mm256_set1_ps(1.0/double(subpixelFactor));
const __m256i zeroVector = _mm256_set1_epi16(0);
float* outputPtr = &pointMap[0];
for(int y = 0; y < height; y++) {
const unsigned char* rowStart = &reinterpret_cast<const unsigned char*>(dispMap)[y*rowStride];
const unsigned char* rowEnd = &reinterpret_cast<const unsigned char*>(dispMap)[y*rowStride + 2*width];
int x = 0;
for(const unsigned char* ptr = rowStart; ptr != rowEnd; ptr += 32) {
__m256i disparities = _mm256_load_si256(reinterpret_cast<const __m256i*>(ptr));
// Find invalid disparities and set them to 0
__m256i validMask = _mm256_cmpgt_epi16(maxDispVector, disparities);
disparities = _mm256_and_si256(validMask, disparities);
// Clamp to minimum disparity
disparities = _mm256_max_epi16(disparities, minDispVector);
// Stupid AVX2 unpack mixes everything up! Lets swap the register beforehand.
__m256i disparitiesMixup = _mm256_permute4x64_epi64(disparities, 0xd8);
// Convert to floats and scale with 1/subpixelFactor
__m256 floatDisp = _mm256_cvtepi32_ps(_mm256_unpacklo_epi16(disparitiesMixup, zeroVector));
__m256 dispScaled = _mm256_mul_ps(floatDisp, scaleVector);
// Copy to array
#ifdef _MSC_VER
__declspec(align(32)) float dispArray[16];
#else
float dispArray[16]__attribute__((aligned(32)));
#endif
_mm256_store_ps(&dispArray[0], dispScaled);
// Same for other half
floatDisp = _mm256_cvtepi32_ps(_mm256_unpackhi_epi16(disparitiesMixup, zeroVector));
dispScaled = _mm256_mul_ps(floatDisp, scaleVector);
_mm256_store_ps(&dispArray[8], dispScaled);
// Iterate over disparities and perform matrix multiplication for each
for(int i=0; i<16; i+=2) {
// Create two vectors
__m256 vec = _mm256_setr_ps(x, y, dispArray[i], 1.0,
x+1, y, dispArray[i+1], 1.0);
// Multiply with matrix
__m256 u1 = _mm256_shuffle_ps(vec,vec, _MM_SHUFFLE(0,0,0,0));
__m256 u2 = _mm256_shuffle_ps(vec,vec, _MM_SHUFFLE(1,1,1,1));
__m256 u3 = _mm256_shuffle_ps(vec,vec, _MM_SHUFFLE(2,2,2,2));
__m256 u4 = _mm256_shuffle_ps(vec,vec, _MM_SHUFFLE(3,3,3,3));
__m256 prod1 = _mm256_mul_ps(u1, qCol0);
__m256 prod2 = _mm256_mul_ps(u2, qCol1);
__m256 prod3 = _mm256_mul_ps(u3, qCol2);
__m256 prod4 = _mm256_mul_ps(u4, qCol3);
__m256 multResult = _mm256_add_ps(_mm256_add_ps(prod1, prod2), _mm256_add_ps(prod3, prod4));
// Divide by w to receive point coordinates
__m256 point = _mm256_div_ps(multResult,
_mm256_shuffle_ps(multResult,multResult, _MM_SHUFFLE(3,3,3,3)));
// Write result to memory
_mm256_store_ps(outputPtr, point);
outputPtr += 8;
x+=2;
}
}
}
return &pointMap[0];
}
#endif
#ifdef __SSE2__
float* Reconstruct3D::Pimpl::createPointMapSSE2(const unsigned short* dispMap, int width,
int height, int rowStride, const float* q, unsigned short minDisparity,
int subpixelFactor, unsigned short maxDisparity) {
// Create column vectors of q
const __m128 qCol0 = _mm_setr_ps(q[0], q[4], q[8], q[12]);
const __m128 qCol1 = _mm_setr_ps(q[1], q[5], q[9], q[13]);
const __m128 qCol2 = _mm_setr_ps(q[2], q[6], q[10], q[14]);
const __m128 qCol3 = _mm_setr_ps(q[3], q[7], q[11], q[15]);
// More constants that we need
const __m128i minDispVector = _mm_set1_epi16(minDisparity);
const __m128i maxDispVector = _mm_set1_epi16(maxDisparity);
const __m128 scaleVector = _mm_set1_ps(1.0/double(subpixelFactor));
const __m128i zeroVector = _mm_set1_epi16(0);
float* outputPtr = &pointMap[0];
for(int y = 0; y < height; y++) {
const unsigned char* rowStart = &reinterpret_cast<const unsigned char*>(dispMap)[y*rowStride];
const unsigned char* rowEnd = &reinterpret_cast<const unsigned char*>(dispMap)[y*rowStride + 2*width];
int x = 0;
for(const unsigned char* ptr = rowStart; ptr != rowEnd; ptr += 16) {
__m128i disparities = _mm_load_si128(reinterpret_cast<const __m128i*>(ptr));
// Find invalid disparities and set them to 0
__m128i validMask = _mm_cmplt_epi16(disparities, maxDispVector);
disparities = _mm_and_si128(validMask, disparities);
// Clamp to minimum disparity
disparities = _mm_max_epi16(disparities, minDispVector);
// Convert to floats and scale with 1/subpixelFactor
__m128 floatDisp = _mm_cvtepi32_ps(_mm_unpacklo_epi16(disparities, zeroVector));
__m128 dispScaled = _mm_mul_ps(floatDisp, scaleVector);
// Copy to array
#ifdef _MSC_VER
__declspec(align(16)) float dispArray[8];
#else
float dispArray[8]__attribute__((aligned(16)));
#endif
_mm_store_ps(&dispArray[0], dispScaled);
// Same for other half
floatDisp = _mm_cvtepi32_ps(_mm_unpackhi_epi16(disparities, zeroVector));
dispScaled = _mm_mul_ps(floatDisp, scaleVector);
_mm_store_ps(&dispArray[4], dispScaled);
// Iterate over disparities and perform matrix multiplication for each
for(int i=0; i<8; i++) {
// Create vector
__m128 vec = _mm_setr_ps(static_cast<float>(x), static_cast<float>(y), dispArray[i], 1.0);
// Multiply with matrix
__m128 u1 = _mm_shuffle_ps(vec,vec, _MM_SHUFFLE(0,0,0,0));
__m128 u2 = _mm_shuffle_ps(vec,vec, _MM_SHUFFLE(1,1,1,1));
__m128 u3 = _mm_shuffle_ps(vec,vec, _MM_SHUFFLE(2,2,2,2));
__m128 u4 = _mm_shuffle_ps(vec,vec, _MM_SHUFFLE(3,3,3,3));
__m128 prod1 = _mm_mul_ps(u1, qCol0);
__m128 prod2 = _mm_mul_ps(u2, qCol1);
__m128 prod3 = _mm_mul_ps(u3, qCol2);
__m128 prod4 = _mm_mul_ps(u4, qCol3);
__m128 multResult = _mm_add_ps(_mm_add_ps(prod1, prod2), _mm_add_ps(prod3, prod4));
// Divide by w to receive point coordinates
__m128 point = _mm_div_ps(multResult,
_mm_shuffle_ps(multResult,multResult, _MM_SHUFFLE(3,3,3,3)));
// Write result to memory
_mm_store_ps(outputPtr, point);
outputPtr += 4;
x++;
}
}
}
return &pointMap[0];
}
#endif
void Reconstruct3D::Pimpl::writePlyFile(const char* file, const unsigned short* dispMap,
const unsigned char* image, int width, int height, ImageSet::ImageFormat format, int dispRowStride,
int imageRowStride, const float* q, double maxZ, bool binary, int subpixelFactor,
unsigned short maxDisparity) {
float* pointMap = createPointMap(dispMap, width, height, dispRowStride,
q, 0, subpixelFactor, maxDisparity);
// Count number of valid points
int pointsCount = 0;
if(maxZ >= 0) {
for(int i=0; i<width*height; i++) {
if(pointMap[4*i+2] <= maxZ) {
pointsCount++;
}
}
} else {
pointsCount = width*height;
}
// Write file header
fstream strm(file, binary ? (ios::out | ios::binary) : ios::out);
strm << "ply" << endl;
if(binary) {
strm << "format binary_little_endian 1.0" << endl;
} else {
strm << "format ascii 1.0" << endl;
}
strm << "element vertex " << pointsCount << endl
<< "property float x" << endl
<< "property float y" << endl
<< "property float z" << endl;
if (image != nullptr) {
// include RGB information only if a camera image was provided
strm << "property uchar red" << endl
<< "property uchar green" << endl
<< "property uchar blue" << endl;
}
strm << "end_header" << endl;
// Write points
for(int i=0; i<width*height; i++) {
int y = i / width;
int x = i % width;
if(maxZ < 0 || pointMap[4*i+2] <= maxZ) {
if(binary) {
// Write binary format
strm.write(reinterpret_cast<char*>(&pointMap[4*i]), sizeof(float)*3);
if (image == nullptr) {
// disparity only, no image data
} else if(format == ImageSet::FORMAT_8_BIT_RGB) {
const unsigned char* col = &image[y*imageRowStride + 3*x];
strm.write(reinterpret_cast<const char*>(col), 3*sizeof(*col));
} else if(format == ImageSet::FORMAT_8_BIT_MONO) {
const unsigned char* col = &image[y*imageRowStride + x];
unsigned char writeData[3] = {*col, *col, *col};
strm.write(reinterpret_cast<const char*>(writeData), sizeof(writeData));
} else if(format == ImageSet::FORMAT_12_BIT_MONO) {
const unsigned short* col = reinterpret_cast<const unsigned short*>(&image[y*imageRowStride + 2*x]);
unsigned char writeData[3] = {
(unsigned char)(*col >> 4),
(unsigned char)(*col >> 4),
(unsigned char)(*col >> 4)
};
strm.write(reinterpret_cast<const char*>(writeData), sizeof(writeData));
}
} else {
// Write ASCII format
if(std::isfinite(pointMap[4*i + 2])) {
strm << pointMap[4*i]
<< " " << pointMap[4*i + 1]
<< " " << pointMap[4*i + 2];
} else {
strm << "NaN NaN NaN";
}
if (image == nullptr) {
// disparity only, no image data
strm << endl;
} else if(format == ImageSet::FORMAT_8_BIT_RGB) {
const unsigned char* col = &image[y*imageRowStride + 3*x];
strm << " " << static_cast<int>(col[0])
<< " " << static_cast<int>(col[1])
<< " " << static_cast<int>(col[2]) << endl;
} else if(format == ImageSet::FORMAT_8_BIT_MONO) {
const unsigned char* col = &image[y*imageRowStride + x];
strm << " " << static_cast<int>(*col)
<< " " << static_cast<int>(*col)
<< " " << static_cast<int>(*col) << endl;
} else if(format == ImageSet::FORMAT_12_BIT_MONO) {
const unsigned short* col = reinterpret_cast<const unsigned short*>(&image[y*imageRowStride + 2*x]);
strm << " " << static_cast<int>(*col >> 4)
<< " " << static_cast<int>(*col >> 4)
<< " " << static_cast<int>(*col >> 4) << endl;
}
}
}
}
}
void Reconstruct3D::Pimpl::writePlyFile(const char* file, const ImageSet& imageSet,
double maxZ, bool binary) {
int indexDisp = imageSet.getIndexOf(ImageSet::IMAGE_DISPARITY);
int indexImg = imageSet.getIndexOf(ImageSet::IMAGE_LEFT);
if(indexDisp == -1) {
throw std::runtime_error("No disparity channel present, cannot create point map!");
}
if(imageSet.getPixelFormat(ImageSet::IMAGE_DISPARITY) != ImageSet::FORMAT_12_BIT_MONO) {
throw std::runtime_error("Disparity map must have 12-bit pixel format!");
}
// write Ply file, passing image data for point colors, if available
writePlyFile(file, reinterpret_cast<unsigned short*>(imageSet.getPixelData(indexDisp)),
(indexImg == -1) ? nullptr : imageSet.getPixelData(indexImg),
imageSet.getWidth(), imageSet.getHeight(),
(indexImg == -1) ? ImageSet::FORMAT_8_BIT_MONO : imageSet.getPixelFormat(indexImg),
imageSet.getRowStride(indexDisp),
(indexImg == -1) ? 0 : imageSet.getRowStride(indexImg),
imageSet.getQMatrix(),
maxZ, binary, imageSet.getSubpixelFactor(), 0xFFF);
}
} // namespace

View file

@ -0,0 +1,292 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_RECONSTRUCT3D_H
#define VISIONTRANSFER_RECONSTRUCT3D_H
#include <limits>
#include <stdexcept>
#include "visiontransfer/common.h"
#include "visiontransfer/imageset.h"
#ifdef OPEN3D_VERSION
# include <memory>
#endif
namespace visiontransfer {
/**
* \brief Transforms a disparity map into a set of 3D points.
*
* Use this class for reconstructing the 3D location for each valid
* point in a disparity map.
*/
class VT_EXPORT Reconstruct3D {
public:
/**
* \brief Constructs a new object for 3D reconstructing.
*/
Reconstruct3D();
~Reconstruct3D();
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Reconstructs the 3D location of each pixel in the given
* disparity map.
*
* \param dispMap Pointer to the data of the disparity map. The disparity map is
* assumed to have a N-bit subpixel resolution. This means that each
* value needs to be divided by the subpixel factor to receive the true disparity.
* \param width Width of the disparity map.
* \param height Height of the disparity map.
* \param rowStride Row stride (i.e. distance between two rows in bytes) of the
* disparity map.
* \param q Disparity-to-depth mapping matrix of size 4x4. The matrix is
* stored in a row-wise alignment. Obtain this matrix from your
* camera calibration data.
* \param minDisparity The minimum disparity, again with N-bit subpixel
* resolution. Lower disparities will be clamped to this value before
* computing the 3D location.
* \param subpixelFactor Subpixel division factor for disparity value.
* \param maxDisparity The maximum value that occurs in the disparity map. Any value
* greater or equal will be marked as invalid.
*
* This method is deprecated in favor of createPointMap(const ImageSet&, unsigned short).
*/
DEPRECATED("Use createPointMap(const ImageSet&, ...) instead.")
float* createPointMap(const unsigned short* dispMap, int width, int height,
int rowStride, const float* q, unsigned short minDisparity = 1,
int subpixelFactor = 16, unsigned short maxDisparity = 0xFFF);
#endif
/**
* \brief Reconstructs the 3D location of each pixel in the given
* disparity map.
*
* \param imageSet Image set containing the disparity map.
* \param minDisparity The minimum disparity with N-bit subpixel resolution.
*
* The output map will have a size of exactly 4*width*height float values. For each
* point the x, y and z coordinates are stored consecutively, plus one additional
* float (four bytes) as padding. Invalid disparities will be set to the given minimum disparity.
*
* If the minimum disparity is set to 0, points with a disparity of 0 or an invalid
* disparity will receive a z coordinate of +inf. If a larger minimum disparity is given,
* points with a lower disparity will be at a fix depth that corresponds to this
* disparity.
*
* The returned point map is valid until the next call of createPointMap(), createZMap(), or
* writePlyFile().
*/
float* createPointMap(const ImageSet& imageSet, unsigned short minDisparity = 1);
/**
* \brief Converts the disparit in an image set to a depth map
*
* \param imageSet Image set containing the disparity map.
* \param minDisparity The minimum disparity with N-bit subpixel resolution.
* \param maxDisparity The maximum value that occurs in the disparity map. Any value
* greater or equal will be marked as invalid.
*
* The output map will have a size of exactly width*height float values. Each
* value represents the depth at the given pixel coordinate in meters.
*
* This method is closely related to createPointMap(). It only computes the
* Z coordinates, whereas createPointMap() also computes X and Y coordinates
* for each image point.
*
* If the minimum disparity is set to 0, points with a disparity of 0 or an invalid
* disparity will receive a z coordinate of +inf. If a larger minimum disparity is given,
* points with a lower disparity will be at a fix depth that corresponds to this
* disparity.
*
* The returned map is valid until the next call of createZMap(), createPointMap() or
* writePlyFile().
*/
float* createZMap(const ImageSet& imageSet, unsigned short minDisparity = 1,
unsigned short maxDisparity = 0xFFF);
/**
* \brief Reconstructs the 3D location of one individual point.
*
* \param imageX X component of the image location.
* \param imageY Y component of the image location.
* \param disparity Value of the disparity map at the image location.
* It is assumed that the lower N bits are the fractional component.
* This means that each value needs to be divided by a subpixel factor to
* receive the true disparity.
* \param q Disparity-to-depth mapping matrix of size 4x4. The matrix is
* stored in a row-wise alignment. Obtain this matrix from your
* camera calibration data.
* \param pointX Destination variable for the 3D point X component.
* \param pointY Destination variable for the 3D point Y component.
* \param pointZ Destination variable for the 3D point Z component.
* \param subpixelFactor Subpixel division factor for disparity value.
*
*
* This method projects a single point from a disparity map to a
* 3D location. If the 3D coordinates of multiple points are of interest,
* createPointMap() should be used for best performance.
*/
void projectSinglePoint(int imageX, int imageY, unsigned short disparity, const float* q,
float& pointX, float& pointY, float& pointZ, int subpixelFactor = 16);
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Projects the given disparity map to 3D points and exports the result to
* a PLY file.
*
* \param file The output file
* \param dispMap Pointer to the data of the disparity map. The disparity map is
* assumed to have a N-bit subpixel resolution. This means that each
* value needs to be divided by a subpixel factor to receive the true disparity.
* \param image Rectified left input image in 8-bit gray scale format.
* \param width Width of the disparity map and left image.
* \param height Height of the disparity map and left image.
* \param format Pixel format for the left image.
* \param dispRowStride Row stride (i.e. distance between two rows in bytes) of the
* disparity map.
* \param imageRowStride Row stride (i.e. distance between two rows in bytes) of the
* left image.
* \param q Disparity-to-depth mapping matrix of size 4x4. The matrix is
* stored in a row-wise alignment. Obtain this matrix from your
* camera calibration data.
* \param maxZ Maximum allowed z-coordinate. Points with a higher z-coordinate
* are not exported.
* \param binary Specifies whether the ASCII or binary PLY-format should be used.
* \param subpixelFactor Subpixel division factor for disparity value.
* \param maxDisparity The maximum value that occurs in the disparity map. Any value
* greater or equal will be marked as invalid.
*
* This method is deprecated in favor of
* writePlyFile(const char*, const ImageSet&, double maxZ, bool);
*/
DEPRECATED("Use writePlyFile(const char*, const ImageSet&, ...) instead.")
void writePlyFile(const char* file, const unsigned short* dispMap,
const unsigned char* image, int width, int height, ImageSet::ImageFormat format,
int dispRowStride, int imageRowStride, const float* q,
double maxZ = std::numeric_limits<double>::max(),
bool binary = false, int subpixelFactor = 16, unsigned short maxDisparity = 0xFFF);
#endif
/**
* \brief Projects the given disparity map to 3D points and exports the result to
* a PLY file.
*
* \param file The name for the output file.
* \param imageSet Image set containing camera image and disparity map.
* \param maxZ Maximum allowed z-coordinate.
* \param binary Specifies whether the ASCII or binary PLY-format should be used.
*
*/
void writePlyFile(const char* file, const ImageSet& imageSet,
double maxZ = std::numeric_limits<double>::max(), bool binary = false);
#ifdef PCL_MAJOR_VERSION
/**
* \brief Projects the given disparity map to a PCL point cloud without pixel intensities
*
* \param imageSet Image set containing the disparity map.
* \param frameId Frame ID that will be assigned to the created point cloud.
* \param minDisparity The minimum disparity with N-bit subpixel resolution.
*
* For this method to be available, the PCL headers must be included before
* the libvisiontransfer headers!
*
* If the minimum disparity is set to 0, points with a disparity of 0 or an invalid
* disparity will receive a z coordinate of +inf. If a larger minimum disparity is given,
* points with a lower disparity will be at a fix depth that corresponds to this
* disparity.
*/
inline pcl::PointCloud<pcl::PointXYZ>::Ptr createXYZCloud(const ImageSet& imageSet,
const char* frameId, unsigned short minDisparity = 0);
/**
* \brief Projects the given disparity map to a PCL point cloud, including pixel intensities.
*
* See createXYZCloud() for details.
*/
inline pcl::PointCloud<pcl::PointXYZI>::Ptr createXYZICloud(const ImageSet& imageSet,
const char* frameId, unsigned short minDisparity = 0);
/**
* \brief Projects the given disparity map to a PCL point cloud, including pixel RGB data.
*
* See createXYZCloud() for details.
*/
inline pcl::PointCloud<pcl::PointXYZRGB>::Ptr createXYZRGBCloud(const ImageSet& imageSet,
const char* frameId, unsigned short minDisparity = 0);
#endif
#ifdef OPEN3D_VERSION
/**
* \brief Projects the given disparity map to a Open3D point cloud
*
* \param imageSet Image set containing the disparity map.
* \param withColor If true, color information will be copied to the
* pointcloud.
* \param minDisparity The minimum disparity with N-bit subpixel resolution.
*
* For this method to be available, the Open3d headers must be included before
* the libvisiontransfer headers!
*
* If the minimum disparity is set to 0, points with a disparity of 0 or an invalid
* disparity will receive a z coordinate of +inf. If a larger minimum disparity is given,
* points with a lower disparity will be at a fix depth that corresponds to this
* disparity.
*/
inline std::shared_ptr<open3d::geometry::PointCloud> createOpen3DCloud(const ImageSet& imageSet,
bool withColor, unsigned short minDisparity = 0);
/**
* \brief Converts the given disparity map to a Open3D RGBDn image
*
* \param imageSet Image set containing the disparity map.
* \param minDisparity The minimum disparity with N-bit subpixel resolution.
*
* For this method to be available, the Open3d headers must be included before
* the libvisiontransfer headers!
*
* If the minimum disparity is set to 0, points with a disparity of 0 or an invalid
* disparity will receive a z coordinate of +inf. If a larger minimum disparity is given,
* points with a lower disparity will be at a fix depth that corresponds to this
* disparity.
*/
inline std::shared_ptr<open3d::geometry::RGBDImage> createOpen3DImageRGBD(const ImageSet& imageSet,
unsigned short minDisparity = 0);
#endif
private:
// We follow the pimpl idiom
class Pimpl;
Pimpl* pimpl;
// This class cannot be copied
Reconstruct3D(const Reconstruct3D& other);
Reconstruct3D& operator=(const Reconstruct3D&);
#ifdef PCL_MAJOR_VERSION
// Initializes a PCL point cloud
template <typename T>
typename pcl::PointCloud<T>::Ptr initPointCloud(const ImageSet& imageSet, const char* frameId);
#endif
};
} // namespace
#include "visiontransfer/reconstruct3d-pcl.h"
#include "visiontransfer/reconstruct3d-open3d.h"
#endif

View file

@ -0,0 +1,23 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_SCENESCANPARAMETERS_H
#define VISIONTRANSFER_SCENESCANPARAMETERS_H
#include "visiontransfer/deviceparameters.h"
#pragma message "DEPRECATION NOTICE: scenescanparameters.h and SceneScanParameters are deprecated in favor of deviceparameteres.h and DeviceParameters"
#endif

View file

@ -0,0 +1,116 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_SENSORDATA_H
#define VISIONTRANSFER_SENSORDATA_H
#define _USE_MATH_DEFINES
#include <cmath>
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
namespace visiontransfer {
/**
* \brief Base class for sensor records with timestamp and status (reliability) fields
*/
class SensorRecord {
protected:
int timestampSec;
int timestampUSec;
unsigned char status;
public:
SensorRecord(int timestampSec, int timestampUSec, unsigned char status): timestampSec(timestampSec), timestampUSec(timestampUSec), status(status) {}
/**
* Returns the sensor-reported timestamp for the reading
*/
void getTimestamp(int& s, int& us) const { s = timestampSec; us = timestampUSec; }
/**
* Returns the current sensor calibration status (range 0..3)
* 0: Sensor unreliable; 1: Accuracy low; 2: ~ medium; 3: ~ high
*/
unsigned char getStatus() const { return status; }
};
/**
* \brief Encapsulate a scalar sensor measurement, containing the value, as well as timestamp and status fields
*/
class TimestampedScalar: public SensorRecord {
public:
double value() const { return valueIntl; }
TimestampedScalar(int timestampSec, int timestampUSec, unsigned char status, double value): SensorRecord(timestampSec, timestampUSec, status), valueIntl(value) {}
TimestampedScalar(): SensorRecord(0, 0, 0), valueIntl(0) { }
private:
double valueIntl;
};
/**
* \brief Encapsulate a 3D sensor report, containing X, Y, Z, as well as timestamp and status fields
*/
class TimestampedVector: public SensorRecord {
public:
double x() const { return xIntl; }
double y() const { return yIntl; }
double z() const { return zIntl; }
TimestampedVector(int timestampSec, int timestampUSec, unsigned char status, double x, double y, double z): SensorRecord(timestampSec, timestampUSec, status), xIntl(x), yIntl(y), zIntl(z) {}
TimestampedVector(): SensorRecord(0, 0, 0), xIntl(0), yIntl(0), zIntl(0) { }
private:
double xIntl, yIntl, zIntl;
};
/**
* \brief Encapsulate a 4D (quaternion) sensor report, containing X, Y, Z, W, as well as timestamp and status fields and measurement accuracy
*
* Component r is the real part of the quaternion, also called w (ijkr corresponds to xyzw).
*/
class TimestampedQuaternion: public SensorRecord {
public:
double x() const { return xIntl; }
double y() const { return yIntl; }
double z() const { return zIntl; }
double w() const { return wIntl; }
/**
* Convert the quaternion to device roll, pitch, and yaw (radians)
*/
void getRollPitchYaw(double& roll, double& pitch, double& yaw) {
// roll
double sinr_cosp = 2 * (wIntl * xIntl + -zIntl * yIntl);
double cosr_cosp = 1 - 2 * (xIntl * xIntl + -zIntl * -zIntl);
roll = std::atan2(sinr_cosp, cosr_cosp);
// pitch
double sinp = 2 * (wIntl * -zIntl - yIntl * xIntl);
pitch = (std::abs(sinp) >= 1) ? ((sinp<0)?-(M_PI/2):(M_PI/2)) : std::asin(sinp);
// yaw
double siny_cosp = 2 * (wIntl * yIntl + xIntl * -zIntl);
double cosy_cosp = 1 - 2 * (-zIntl * -zIntl + yIntl * yIntl);
yaw = std::atan2(siny_cosp, cosy_cosp);
}
/**
* Returns the internal device-reported angular accuracy (radians)
*/
double accuracy() const { return accuracyIntl; }
TimestampedQuaternion(int timestampSec, int timestampUSec, unsigned char status, double x, double y, double z, double w, double accuracy): SensorRecord(timestampSec, timestampUSec, status), xIntl(x), yIntl(y), zIntl(z), wIntl(w), accuracyIntl(accuracy) {}
TimestampedQuaternion(): SensorRecord(0, 0, 0), xIntl(0), yIntl(0), zIntl(0), wIntl(0), accuracyIntl(0) { }
private:
double xIntl, yIntl, zIntl, wIntl;
double accuracyIntl;
};
} // namespace
#endif

View file

@ -0,0 +1,146 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_SENSORRINGBUFFER_H
#define VISIONTRANSFER_SENSORRINGBUFFER_H
#include <array>
#include <vector>
#include <chrono>
#include <thread>
#include <mutex>
#include <tuple>
#include <visiontransfer/sensordata.h>
using namespace visiontransfer;
namespace visiontransfer {
namespace internal {
/**
* Thread-safe ring buffer for timestamped generic sensor data.
* RecordType needs to implement getTimestamp() in order to perform
* comparisons in popBetweenTimes() (= obtain data series in interval).
*
* Maximum capacity of the buffer is RINGBUFFER_SIZE-1.
* lostSamples() tallies the number of samples silently lost due
* to buffer overruns, and is reset by any of the pop...() methods.
*/
template<typename RecordType, int RINGBUFFER_SIZE>
class SensorDataRingBuffer {
private:
int read_horizon, write_position, read_next;
unsigned long lostSamples;
std::array<RecordType, RINGBUFFER_SIZE> buffer;
std::recursive_mutex mutex;
public:
constexpr unsigned int ringbufferSize() const { return RINGBUFFER_SIZE; }
SensorDataRingBuffer(): read_horizon(0), write_position(0), read_next(0), lostSamples(0) { }
constexpr int capacity() const { return ringbufferSize() - 1; }
int size() const { return (ringbufferSize() + (write_position - read_next)) % ringbufferSize(); }
int samplesLost() const { return lostSamples; }
bool isFull() const { return size()==capacity(); }
bool isEmpty() const { return write_position==read_next; }
bool advanceWritePosition() {
write_position = (write_position + 1) % ringbufferSize();
if (write_position==read_next) {
// Ring buffer overrun: advance and increment lost samples count
read_next = (write_position + 1) % ringbufferSize();
lostSamples++;
}
return lostSamples==0;
}
bool pushData(const std::vector<RecordType>& data) {
// A more efficient implementation could be substituted on demand
std::unique_lock<std::recursive_mutex> lock(mutex);
for (auto const& d: data) {
(void) pushData(d);
}
return lostSamples==0;
}
bool pushData(const RecordType& data) {
std::unique_lock<std::recursive_mutex> lock(mutex);
buffer[write_position] = data;
return advanceWritePosition();
}
bool pushData(RecordType&& data) {
std::unique_lock<std::recursive_mutex> lock(mutex);
buffer[write_position] = std::move(data);
return advanceWritePosition();
}
// \brief Pop and return the whole ring buffer contents
std::vector<RecordType> popAllData() {
std::unique_lock<std::recursive_mutex> lock(mutex);
lostSamples = 0;
if (write_position < read_next) {
// wrapped
std::vector<RecordType> v(buffer.begin()+read_next, buffer.end());
v.reserve(v.size() + write_position);
std::copy(buffer.begin(), buffer.begin() + write_position, std::back_inserter(v));
read_next = (write_position) % ringbufferSize();
return v;
} else {
std::vector<RecordType> v(buffer.begin()+read_next, buffer.begin()+write_position);
read_next = (write_position) % ringbufferSize();
return v;
}
}
/// \brief Pop and return the data between timestamps (or the whole ring buffer contents if not provided)
std::vector<RecordType> popBetweenTimes(int fromSec = 0, int fromUSec = 0, int untilSec = 0x7fffFFFFl, int untilUSec = 0x7fffFFFFl) {
std::unique_lock<std::recursive_mutex> lock(mutex);
lostSamples = 0;
int tsSec, tsUSec;
if (write_position == read_next) return std::vector<RecordType>();
// Find first relevant sample (matching or exceeding the specified start time)
buffer[read_next].getTimestamp(tsSec, tsUSec);
while ((tsSec < fromSec) || ((tsSec == fromSec) && (tsUSec < fromUSec))) {
read_next = (read_next + 1) % ringbufferSize();
if (write_position == read_next) return std::vector<RecordType>();
}
// Find last relevant sample (not exceeding the specified end time)
int lastidx = read_next;
int li;
buffer[lastidx].getTimestamp(tsSec, tsUSec);
while ((tsSec < untilSec) || ((tsSec == untilSec) && (tsUSec <= untilUSec))) {
li = (lastidx + 1) % ringbufferSize();
lastidx = li;
if (li == write_position) break;
}
if (lastidx < read_next) {
// Wrapped
std::vector<RecordType> v(buffer.begin()+read_next, buffer.end());
v.reserve(v.size() + lastidx);
std::copy(buffer.begin(), buffer.begin() + lastidx, std::back_inserter(v));
read_next = lastidx;
return v;
} else {
std::vector<RecordType> v(buffer.begin()+read_next, buffer.begin()+lastidx);
read_next = (lastidx) % ringbufferSize();
return v;
}
}
};
}} // namespace
#endif

View file

@ -0,0 +1,102 @@
#include <visiontransfer/standardparameterids.h>
namespace visiontransfer {
namespace internal {
/// Return the ID for a string configuration key (reverse lookup), or UNDEFINED if unknown
StandardParameterIDs::ParameterID StandardParameterIDs::getParameterIDForName(const std::string& name)
{
static std::map<std::string, StandardParameterIDs::ParameterID> lookup;
if (!lookup.size()) {
std::map<std::string, StandardParameterIDs::ParameterID> m;
for (const auto& kv: StandardParameterIDs::parameterNameByID) {
m[kv.second] = kv.first;
}
lookup = m;
}
auto it = lookup.find(name);
if (it==lookup.end()) return StandardParameterIDs::ParameterID::UNDEFINED;
return it->second;
}
const std::map<StandardParameterIDs::ParameterID, std::string>
StandardParameterIDs::parameterNameByID {
// Processing settings
{OPERATION_MODE, "operation_mode"},
{NUMBER_OF_DISPARITIES, "number_of_disparities"},
{DISPARITY_OFFSET, "disparity_offset"},
{MAX_NUMBER_OF_IMAGES, "max_number_of_images"},
// Algorithmic settings
{SGM_P1_EDGE, "sgm_p1_edge"},
{SGM_P2_EDGE, "sgm_p2_edge"},
{SGM_P1_NO_EDGE, "sgm_p1_no_edge"},
{SGM_P2_NO_EDGE, "sgm_p2_no_edge"},
{SGM_EDGE_SENSITIVITY, "sgm_edge_sensitivity"},
{MASK_BORDER_PIXELS_ENABLED, "mask_border_pixels_enabled"},
{CONSISTENCY_CHECK_ENABLED, "consistency_check_enabled"},
{CONSISTENCY_CHECK_SENSITIVITY, "consistency_check_sensitivity"},
{UNIQUENESS_CHECK_ENABLED, "uniqueness_check_enabled"},
{UNIQUENESS_CHECK_SENSITIVITY, "uniqueness_check_sensitivity"},
{TEXTURE_FILTER_ENABLED, "texture_filter_enabled"},
{TEXTURE_FILTER_SENSITIVITY, "texture_filter_sensitivity"},
{GAP_INTERPOLATION_ENABLED, "gap_interpolation_enabled"},
{NOISE_REDUCTION_ENABLED, "noise_reduction_enabled"},
{SPECKLE_FILTER_ITERATIONS, "speckle_filter_iterations"},
{SUBPIXEL_OPTIMIZATION_ROI_ENABLED, "subpixel_optimization_roi_enabled"},
{SUBPIXEL_OPTIMIZATION_ROI_X, "subpixel_optimization_roi_x"},
{SUBPIXEL_OPTIMIZATION_ROI_Y, "subpixel_optimization_roi_y"},
{SUBPIXEL_OPTIMIZATION_ROI_WIDTH, "subpixel_optimization_roi_width"},
{SUBPIXEL_OPTIMIZATION_ROI_HEIGHT, "subpixel_optimization_roi_height"},
// Exposure settings
{AUTO_EXPOSURE_MODE, "auto_exposure_mode"},
{AUTO_TARGET_INTENSITY, "auto_target_intensity"},
{AUTO_INTENSITY_DELTA, "auto_intensity_delta"},
{AUTO_TARGET_FRAME, "auto_target_frame"},
{AUTO_SKIPPED_FRAMES, "auto_skipped_frames"},
{AUTO_MAXIMUM_EXPOSURE_TIME, "auto_maximum_exposure_time"},
{AUTO_MAXIMUM_GAIN, "auto_maximum_gain"},
{MANUAL_EXPOSURE_TIME, "manual_exposure_time"},
{MANUAL_GAIN, "manual_gain"},
{AUTO_EXPOSURE_ROI_ENABLED, "auto_exposure_roi_enabled"},
{AUTO_EXPOSURE_ROI_X, "auto_exposure_roi_x"},
{AUTO_EXPOSURE_ROI_Y, "auto_exposure_roi_y"},
{AUTO_EXPOSURE_ROI_WIDTH, "auto_exposure_roi_width"},
{AUTO_EXPOSURE_ROI_HEIGHT, "auto_exposure_roi_height"},
// Trigger / Pairing
{MAX_FRAME_TIME_DIFFERENCE_MS, "max_frame_time_difference_ms"},
{TRIGGER_FREQUENCY, "trigger_frequency"},
{TRIGGER_0_ENABLED, "trigger_0_enabled"},
{TRIGGER_0_PULSE_WIDTH, "trigger_0_pulse_width"},
{TRIGGER_1_ENABLED, "trigger_1_enabled"},
{TRIGGER_1_PULSE_WIDTH, "trigger_1_pulse_width"},
{TRIGGER_1_OFFSET, "trigger_1_offset"},
{TRIGGER_0B_PULSE_WIDTH, "trigger_0b_pulse_width"},
{TRIGGER_0C_PULSE_WIDTH, "trigger_0c_pulse_width"},
{TRIGGER_0D_PULSE_WIDTH, "trigger_0d_pulse_width"},
{TRIGGER_1B_PULSE_WIDTH, "trigger_1b_pulse_width"},
{TRIGGER_1C_PULSE_WIDTH, "trigger_1c_pulse_width"},
{TRIGGER_1D_PULSE_WIDTH, "trigger_1d_pulse_width"},
{TRIGGER_0_POLARITY, "trigger_0_polarity"},
{TRIGGER_1_POLARITY, "trigger_1_polarity"},
{TRIGGER_0E_PULSE_WIDTH, "trigger_0e_pulse_width"},
{TRIGGER_0F_PULSE_WIDTH, "trigger_0f_pulse_width"},
{TRIGGER_0G_PULSE_WIDTH, "trigger_0g_pulse_width"},
{TRIGGER_0H_PULSE_WIDTH, "trigger_0h_pulse_width"},
{TRIGGER_1E_PULSE_WIDTH, "trigger_1e_pulse_width"},
{TRIGGER_1F_PULSE_WIDTH, "trigger_1f_pulse_width"},
{TRIGGER_1G_PULSE_WIDTH, "trigger_1g_pulse_width"},
{TRIGGER_1H_PULSE_WIDTH, "trigger_1h_pulse_width"},
{TRIGGER_0_CONSTANT, "trigger_0_constant"},
{TRIGGER_1_CONSTANT, "trigger_0_constant"},
{TRIGGER_INPUT, "trigger_input"},
// Auto Re-calibration
{AUTO_RECALIBRATION_ENABLED, "auto_recalibration_enabled"},
{AUTO_RECALIBRATION_PERMANENT, "auto_recalibration_permanent"},
// System settings
{REBOOT, "reboot"},
{PPS_SYNC, "pps_sync"},
};
}} // namespace

View file

@ -0,0 +1,141 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#ifndef VISIONTRANSFER_STANDARDRAMETERIDS_H
#define VISIONTRANSFER_STANDARDRAMETERIDS_H
#include <map>
#include <string>
#include <stdint.h>
#include <visiontransfer/parameterinfo.h>
namespace visiontransfer {
namespace internal {
/**
* \brief A collection of numeric IDs for all supported parameters by
* Nerian stereo devices.
*
* This class is only used internally. Users should use the class
* \ref DeviceParameters instead.
*/
class StandardParameterIDs {
public:
enum ParameterID {
// Reserved
UNDEFINED = 0x0000,
// Processing settings
OPERATION_MODE = 0x0100,
NUMBER_OF_DISPARITIES = 0x0101, // Not available yet
DISPARITY_OFFSET = 0x0102,
MAX_NUMBER_OF_IMAGES = 0x0103,
// Algorithmic settings
SGM_P1_EDGE = 0x0200,
SGM_P2_EDGE = 0x0201,
MASK_BORDER_PIXELS_ENABLED = 0x0202,
CONSISTENCY_CHECK_ENABLED = 0x0203,
CONSISTENCY_CHECK_SENSITIVITY = 0x0204,
UNIQUENESS_CHECK_ENABLED = 0x0205,
UNIQUENESS_CHECK_SENSITIVITY = 0x0206,
TEXTURE_FILTER_ENABLED = 0x0207,
TEXTURE_FILTER_SENSITIVITY = 0x0208,
GAP_INTERPOLATION_ENABLED = 0x0209,
NOISE_REDUCTION_ENABLED = 0x020a,
SPECKLE_FILTER_ITERATIONS = 0x020b,
SGM_P1_NO_EDGE = 0x020c,
SGM_P2_NO_EDGE = 0x020d,
SGM_EDGE_SENSITIVITY = 0x020e,
SUBPIXEL_OPTIMIZATION_ROI_ENABLED = 0x020f,
SUBPIXEL_OPTIMIZATION_ROI_X = 0x0210,
SUBPIXEL_OPTIMIZATION_ROI_Y = 0x0211,
SUBPIXEL_OPTIMIZATION_ROI_WIDTH = 0x0212,
SUBPIXEL_OPTIMIZATION_ROI_HEIGHT = 0x0213,
// Exposure settings
AUTO_EXPOSURE_MODE = 0x0300,
AUTO_TARGET_INTENSITY = 0x0301,
AUTO_INTENSITY_DELTA = 0x0302,
AUTO_TARGET_FRAME = 0x0303,
AUTO_SKIPPED_FRAMES = 0x0304,
AUTO_MAXIMUM_EXPOSURE_TIME = 0x0305,
AUTO_MAXIMUM_GAIN = 0x0306,
MANUAL_EXPOSURE_TIME = 0x0307,
MANUAL_GAIN = 0x0308,
AUTO_EXPOSURE_ROI_ENABLED = 0x0309,
AUTO_EXPOSURE_ROI_X = 0x030a,
AUTO_EXPOSURE_ROI_Y = 0x030b,
AUTO_EXPOSURE_ROI_WIDTH = 0x030c,
AUTO_EXPOSURE_ROI_HEIGHT = 0x030d,
// Trigger / Pairing
MAX_FRAME_TIME_DIFFERENCE_MS = 0x0400,
TRIGGER_FREQUENCY = 0x0401,
TRIGGER_0_ENABLED = 0x0402,
TRIGGER_0_PULSE_WIDTH = 0x0403,
TRIGGER_1_ENABLED = 0x0404,
TRIGGER_1_PULSE_WIDTH = 0x0405,
TRIGGER_1_OFFSET = 0x0406,
TRIGGER_0B_PULSE_WIDTH = 0x0407,
TRIGGER_0C_PULSE_WIDTH = 0x0408,
TRIGGER_0D_PULSE_WIDTH = 0x0409,
TRIGGER_1B_PULSE_WIDTH = 0x040a,
TRIGGER_1C_PULSE_WIDTH = 0x040b,
TRIGGER_1D_PULSE_WIDTH = 0x040c,
TRIGGER_0_POLARITY = 0x040d,
TRIGGER_1_POLARITY = 0x040e,
TRIGGER_0E_PULSE_WIDTH = 0x040f,
TRIGGER_0F_PULSE_WIDTH = 0x0410,
TRIGGER_0G_PULSE_WIDTH = 0x0411,
TRIGGER_0H_PULSE_WIDTH = 0x0412,
TRIGGER_1E_PULSE_WIDTH = 0x0413,
TRIGGER_1F_PULSE_WIDTH = 0x0414,
TRIGGER_1G_PULSE_WIDTH = 0x0415,
TRIGGER_1H_PULSE_WIDTH = 0x0416,
TRIGGER_0_CONSTANT = 0x0417,
TRIGGER_1_CONSTANT = 0x0418,
TRIGGER_INPUT = 0x0419,
// Auto Re-calibration
AUTO_RECALIBRATION_ENABLED = 0x0500,
AUTO_RECALIBRATION_PERMANENT = 0x0501,
// System settings
REBOOT = 0x0600,
PPS_SYNC = 0x0601
};
enum ParameterFlags {
// bit flags
PARAMETER_WRITEABLE = 0x0001,
};
// String representations for all ParameterIDs. They correspond
// to a lowercase version, OPERATION_MODE <-> "operation_mode";
// contents initialized C++11 style over in the source file
static const std::map<ParameterID, std::string> parameterNameByID;
// Obtain the ParameterID for a parameter name, or UNDEFINED if invalid
static ParameterID getParameterIDForName(const std::string& name);
};
}} // namespace
#endif

View file

@ -0,0 +1,179 @@
/*******************************************************************************
* Copyright (c) 2021 Nerian Vision GmbH
* Copyright (c) 2022 Swinburne University of Technology
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*******************************************************************************/
#include <opencv2/opencv.hpp>
// PCL headers must be included first!
#include <pcl/pcl_base.h>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/filters/extract_indices.h>
#include <visiontransfer/deviceenumeration.h>
#include <visiontransfer/asynctransfer.h>
#include <visiontransfer/imageset.h>
#include <visiontransfer/reconstruct3d.h>
#include <iostream>
#include <exception>
#include <stdio.h>
#include <senshamart/client/camera.hpp>
#include <thread>
#include <chrono>
#include <unistd.h>
#include <netinet/in.h>
#ifdef _MSC_VER
// Visual studio does not come with snprintf
#define snprintf _snprintf_s
#endif
using namespace visiontransfer;
namespace {
constexpr auto delta = std::chrono::seconds{ 1 } / 25;
}
int main(int argc, const char** argv) {
if (argc < 4) {
fprintf(stderr, "Expected %s <broker endpoint> <camera sensor name> <gps sensor name>\n", argv[0]);
return -1;
}
const char* const broker_endpoint = argv[1];
const char* const camera_sensor_name = argv[2];
const char* const gps_sensor_name = argv[3];
// Init AWS Code
senshamart::Camera_info init_info;
// Name, same as the KVS cloud. It is the hostname.
init_info.width = 1024;
init_info.height = 768;
init_info.broker_endpoint = broker_endpoint;
init_info.camera_sensor_name = camera_sensor_name;
init_info.gps_sensor_name = gps_sensor_name;
senshamart::Camera camera{ init_info };
int in;
senshamart::Clock::time_point now = senshamart::Clock::now();
std::cout << "[Nerian] Initialisation done" << std::endl;
// UDP socket to receive GNSS data
int sockfd, n;
struct sockaddr_in servaddr, cliaddr;
socklen_t len;
char gnssCoord[35] = "-0,-0,-0"; // Weird initialisation to indicate there is no GNSS data
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
servaddr.sin_port = htons(32100);
bind(sockfd, (struct sockaddr*)&servaddr, sizeof(servaddr));
try {
// Search for Nerian stereo devices
DeviceEnumeration deviceEnum;
DeviceEnumeration::DeviceList devices = deviceEnum.discoverDevices();
int cameraWaitingCycles = 0;
while (devices.size() == 0) {
// GG is shit because it does not restart services if they fail more than 3 times.
printf("[Nerian] No devices discovered! Waiting 0.5s and trying again. Total waiting time: %.1f seconds.\n", cameraWaitingCycles * 0.5);
std::this_thread::sleep_for(std::chrono::milliseconds(500));
devices = deviceEnum.discoverDevices();
cameraWaitingCycles++;
//return -1;
}
// Print devices
std::cout << "[Nerian] Discovered devices:" << std::endl;
for (unsigned int i = 0; i < devices.size(); i++) {
std::cout << "[Nerian] " << devices[i].toString() << std::endl;
}
std::cout << std::endl;
// Create an image transfer object that receives data from the first detected device
// and get the status of the camera
AsyncTransfer asyncTransfer(devices[0]);
auto status = devices[0].getStatus();
// Variables to calculate fps
std::chrono::time_point<std::chrono::system_clock> timeNewFrame, timeOldFrame;
// Receive and send images
while (true) {
// GNSS stuff
len = sizeof(cliaddr);
n = recvfrom(sockfd, gnssCoord, 35, MSG_DONTWAIT, (struct sockaddr*)&cliaddr, &len);
if (n > 0) {
gnssCoord[n] = 0;
printf("[GNSS] Received the following: ");
printf("%s", gnssCoord);
std::stringstream ss;
ss << gnssCoord;
std::string lat, lon, speed;
getline(ss, lat, ',');
getline(ss, lon, ',');
getline(ss, speed, ',');
printf("[Nerian] Sending GPS and Speed: %lf %lf %lf \n", std::stod(lat), std::stod(lon), std::stod(speed));
camera.add_gps(senshamart::Latitude{std::stod(lat)}, senshamart::Longitude{std::stod(lon)}, std::stod(speed));
} else {
printf("[Nerian] Not sending GPS and Speed\n");
}
// Receive image
ImageSet imageSet;
while (!asyncTransfer.collectReceivedImageSet(imageSet, 0.1 /*timeout*/)) {
// FIXME: Blocking code that we are not logging/handling. It needs testing
// Keep on trying until reception is successful
}
// Compute frame rate
timeOldFrame = timeNewFrame;
timeNewFrame = std::chrono::system_clock::now();
std::chrono::duration<double> elapsedSeconds = timeNewFrame - timeOldFrame;
std::cout << "[Nerian] Receiving image set at " << 1 / elapsedSeconds.count() << " fps" << std::endl;
// Nerian Camera Stuff
// Write only image 1, so we don't care about the other images. The other images are disparity maps
// Sending frames here
cv::Mat convertedImage;
imageSet.toOpenCVImage(0, convertedImage); // Converting image 0 which is RGB
camera.add_frame(convertedImage); // Sending RGB image in cv::Mat format
std::this_thread::sleep_until(now + delta);
now += delta;
}
} catch (const std::exception& ex) {
std::cerr << "Exception occurred: " << ex.what() << std::endl;
}
return 0;
}

View file

@ -0,0 +1,74 @@
#pragma once
#include <cstddef>
#include <chrono>
#include <utility>
#include <memory>
#include <string>
#include <optional>
#include <opencv2/opencv.hpp>
namespace senshamart {
using Clock = std::chrono::system_clock;
//strong type for latitude
struct Latitude {
double val;
};
//strong type for longitude
struct Longitude {
double val;
};
struct Camera_info {
std::size_t width;
std::size_t height;
std::string broker_endpoint;
std::string camera_sensor_name;
std::string gps_sensor_name;
};
class Camera final {
public:
//init with the init info, may throw if there's an error. width and height are the resolution of the expected frames
Camera(Camera_info const&);
Camera(Camera const&) = delete;
Camera(Camera&&) = default;
Camera& operator=(Camera const&) = delete;
Camera& operator=(Camera&&) = default;
//add a frame, expects it raw
//will check to see if frame is expected size to match resolution in constructor
void add_frame(void* data, std::size_t size);
//add a frame, expects it raw
//will check to see if frame is expected size to match resolution in constructor
void add_frame(void* data, std::size_t size, Clock::time_point time);
//add a frame, expects it raw, helper to automatically add time
//will check to see if frame is expected size to match resolution in constructor
void add_frame(cv::Mat const& frame);
//add a frame, expects it raw
//will check to see if frame is expected size to match resolution in constructor
void add_frame(cv::Mat const& frame, Clock::time_point time);
//adds the gps location and speed, helper to automatically add time
template<typename First, typename Second>
void add_gps(First&& first, Second&& second, double speed) {
add_gps(std::forward<First>(first), std::forward<Second>(second), speed, Clock::now());
}
//adds the gps location and speed
void add_gps(Latitude latitude, Longitude longitude, double speed, Clock::time_point time);
void add_gps(Longitude longitude, Latitude latitude, double speed, Clock::time_point time);
private:
struct Pimpl_deleter_ {
void operator()(void*) const noexcept;
};
std::unique_ptr<void, Pimpl_deleter_> pimpl_;
};
}

View file

@ -0,0 +1,70 @@
#include <senshamart/client/camera.hpp>
#include <thread>
#include <vector>
#include <set>
#include <fstream>
#include <filesystem>
namespace {
constexpr auto delta = std::chrono::seconds{ 1 } / 20;
}
int main(int argc, const char** argv) {
senshamart::Camera_info init_info;
init_info.broker_endpoint = "tcp://127.0.0.1:5004";
init_info.camera_sensor_name = "camera_sensor";
init_info.gps_sensor_name = "gps_sensor";
init_info.width = 1024;
init_info.height = 768;
senshamart::Camera client{ init_info };
int in;
senshamart::Clock::time_point now = senshamart::Clock::now();
std::set<std::filesystem::path> image_paths;
for(std::filesystem::directory_iterator directory_iter{ "C:\\users\\dekibeki\\documents\\png_02\\" };
directory_iter != std::filesystem::directory_iterator{};
++directory_iter) {
if(image_paths.size() > 100) {
break;
}
image_paths.insert(directory_iter->path());
}
std::vector<cv::Mat> images;
std::vector<char> temp;
for(auto const& path : image_paths) {
std::ifstream file{ path, std::ios_base::binary };
while((in = file.get()) != std::ifstream::traits_type::eof()) {
temp.push_back(static_cast<char>(in));
}
images.emplace_back(cv::imdecode(temp, cv::IMREAD_COLOR));
temp.clear();
}
// Sending frames here
fprintf(stderr, "Starting streaming\n");
std::size_t frame_count = 0;
for(;;) {
for(auto& image : images) {
fprintf(stderr, "Sending frame %zd\n", frame_count);
client.add_frame(image);
client.add_gps(senshamart::Latitude{ 0 }, senshamart::Longitude{ 0 }, 0);
//std::this_thread::sleep_until(now + delta);
now += delta;
++frame_count;
}
fprintf(stderr, "Repeating\n");
}
}

View file

@ -0,0 +1,320 @@
#include <senshamart/client/camera.hpp>
#include <senshamart/senshamart_client.hpp>
#include <stdexcept>
#include <vector>
#include <opencv2/opencv.hpp>
#include <random>
#include <deque>
#include <climits>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
}
namespace {
struct AVFrame_closer {
void operator()(AVFrame* p) const noexcept {
av_frame_free(&p);
}
};
using Frame = std::unique_ptr<AVFrame, AVFrame_closer>;
struct AVPacket_closer {
void operator()(AVPacket* p) const noexcept {
av_packet_free(&p);
}
};
using Packet = std::unique_ptr<AVPacket, AVPacket_closer>;
struct AVCodecContext_closer {
void operator()(AVCodecContext* p) const noexcept {
avcodec_free_context(&p);
}
};
using Codec_context = std::unique_ptr<AVCodecContext, AVCodecContext_closer>;
struct AVFormatContext_closer {
void operator()(AVFormatContext* p) const noexcept {
if (p != nullptr && p->pb != nullptr) {
if (p->pb->buffer != nullptr) {
av_free(p->pb->buffer);
}
avio_context_free(&p->pb);
}
avformat_free_context(p);
}
};
using Format_context = std::unique_ptr<AVFormatContext, AVFormatContext_closer>;
struct Camera_impl final {
public:
//interface public
Camera_impl(senshamart::Camera_info const& init_info) :
camera_client_(init_info.broker_endpoint, init_info.camera_sensor_name),
gps_client_(init_info.broker_endpoint, init_info.gps_sensor_name) {
video_init_(init_info);
}
void add_frame(void* data, std::size_t size, senshamart::Clock::time_point time) {
//we assume the data is B G R, one byte per
assert(size == 3 * width_ * height_);
cv::Mat frame{ static_cast<int>(height_), static_cast<int>(width_), CV_8UC3, data};
out_frame_->pts = in_frame_count_;
if (av_frame_make_writable(out_frame_.get()) < 0) {
fprintf(stderr, "Couldn't make frame writable\n");
std::exit(-1);
}
cv::Mat cv_frame_converted;
cv::cvtColor(frame, cv_frame_converted, cv::COLOR_BGR2YUV_I420);
for (int y = 0; y < height_; ++y) {
for (int x = 0; x < width_; ++x) {
out_frame_->data[0][y * out_frame_->linesize[0] + x] = cv_frame_converted.at<uint8_t>(y, x);
}
}
for (int i = 0; i < width_ * height_ / 4; ++i) {
const int to_x = i % (width_ / 2);
const int to_y = i / (width_ / 2);
const int from_x = i / width_;
const int from_y = i % width_;
out_frame_->data[1][to_y * out_frame_->linesize[1] + to_x] = cv_frame_converted.at<uint8_t>(height_ + from_x, from_y);
out_frame_->data[2][to_y * out_frame_->linesize[2] + to_x] = cv_frame_converted.at<uint8_t>((height_ / 4) * 5 + from_x, from_y);
}
if (avcodec_send_frame(codec_ctx_.get(), out_frame_.get()) < 0) {
throw std::runtime_error{ "Couldn't send frame" };
}
++in_frame_count_;
while (avcodec_receive_packet(codec_ctx_.get(), packet_.get()) >= 0) {
write_encoded_(
packet_.get());
av_packet_unref(packet_.get());
}
}
void add_gps(senshamart::Longitude longitude, senshamart::Latitude latitude, double speed, senshamart::Clock::time_point time) {
std::stringstream constructing;
constructing <<
"{"
"\"latitude\":" << latitude.val << ","
"\"longitude\":" << longitude.val << ","
"\"speed\":" << speed << ","
"\"when\":" << std::chrono::duration_cast<std::chrono::seconds>(time.time_since_epoch()).count() <<
"}";
gps_client_.send(constructing.str());
}
void finish() {
}
~Camera_impl() {
finish();
}
private:
void write_encoded_(AVPacket* packet) {
if ((packet->flags & AV_PKT_FLAG_KEY) != 0) {
//flush
av_write_frame(fmt_ctx_.get(), nullptr);
//send
if (!buffer_.empty()) {
camera_client_.send(std::move(buffer_));
buffer_.clear();
}
}
av_packet_rescale_ts(packet, codec_ctx_->time_base, vid_stream_->time_base);
packet->stream_index = 0;
av_write_frame(fmt_ctx_.get(), packet);
}
void video_init_(senshamart::Camera_info const& init_info) {
//encoding
width_ = init_info.width;
height_ = init_info.height;
codec_ = avcodec_find_encoder_by_name("libvpx-vp9");
if (codec_ == nullptr) {
throw std::runtime_error{ "Couldn't find codec" };
}
codec_ctx_.reset(avcodec_alloc_context3(codec_));
if (codec_ctx_ == nullptr) {
throw std::runtime_error{ "Couldn't open codec context" };
}
codec_ctx_->time_base = AVRational{ 1,25 };
codec_ctx_->framerate = AVRational{ 25,1 };
codec_ctx_->width = width_;
codec_ctx_->height = height_;
codec_ctx_->gop_size = 25;
codec_ctx_->keyint_min = 25;
codec_ctx_->max_b_frames = 1;
codec_ctx_->pix_fmt = AVPixelFormat::AV_PIX_FMT_YUV420P;
if (avcodec_open2(codec_ctx_.get(), codec_, nullptr) < 0) {
throw std::runtime_error{ "Couldn't open codec" };
}
out_frame_.reset(av_frame_alloc());
if (out_frame_ == nullptr) {
throw std::runtime_error{ "Couldn't open frame" };
}
out_frame_->width = width_;
out_frame_->height = height_;
out_frame_->format = codec_ctx_->pix_fmt;
if (av_frame_get_buffer(out_frame_.get(), 0) < 0) {
throw std::runtime_error{ "Couldn't make frame buffer" };
}
//muxing
fmt_ctx_.reset(avformat_alloc_context());
if (fmt_ctx_ == nullptr) {
fprintf(stderr, "Couldn't create out fmt ctx\n");
std::exit(-1);
}
fmt_ctx_->oformat = av_guess_format(nullptr, ".webm", nullptr);
if (fmt_ctx_->oformat == nullptr) {
fprintf(stderr, "Couldn't find format for .webm\n");
std::exit(-1);
}
fmt_ctx_->pb = avio_alloc_context(static_cast<unsigned char*>(av_malloc(4096)), 4096, 1,
this, nullptr, &static_mux_cb_, nullptr);
vid_stream_ = avformat_new_stream(fmt_ctx_.get(), codec_);
if (vid_stream_ == nullptr) {
fprintf(stderr, "Couldn't make stream\n");
std::exit(-1);
}
vid_stream_->time_base = codec_ctx_->time_base;
vid_stream_->r_frame_rate = { 5,1 };
vid_stream_->avg_frame_rate = { 5,1 };
if (avcodec_parameters_from_context(vid_stream_->codecpar, codec_ctx_.get()) < 0) {
fprintf(stderr, "Couldn't set codecpar\n");
std::exit(-1);
}
if (avformat_init_output(fmt_ctx_.get(), nullptr) < 0) {
fprintf(stderr, "Could not init output fmt\n");
std::exit(-1);
}
av_opt_set(fmt_ctx_->priv_data, "dash", "1", 0);
av_opt_set(fmt_ctx_->priv_data, "live", "1", 0);
if (avformat_write_header(fmt_ctx_.get(), nullptr) < 0) {
fprintf(stderr, "Couldn't write header\n");
std::exit(-1);
}
if (av_write_frame(fmt_ctx_.get(), nullptr) < 0) {
fprintf(stderr, "Couldn't flush header\n");
std::exit(-1);
}
FILE* header = fopen("./video_header", "wb");
for (char c : buffer_) {
fputc(c, header);
}
fclose(header);
buffer_.clear();
}
int mux_cb_(uint8_t* data, int size) noexcept {
buffer_.resize(buffer_.size() + size);
memcpy(buffer_.data() + buffer_.size() - size, data, size);
return 0;
}
static int static_mux_cb_(void* opaque, uint8_t* p, int size) noexcept {
return static_cast<Camera_impl*>(opaque)->mux_cb_(p, size);
}
//video stuff
std::size_t width_;
std::size_t height_;
//encoding
int64_t in_frame_count_ = 0;
const AVCodec* codec_ = nullptr;
Codec_context codec_ctx_;
Packet packet_{ av_packet_alloc() };
Frame out_frame_;
//muxing
AVStream* vid_stream_ = nullptr;
Format_context fmt_ctx_;
std::string buffer_;
//mqtt
senshamart::Client camera_client_;
senshamart::Client gps_client_;
};
}
//client
senshamart::Camera::Camera(Camera_info const& init_info) :
pimpl_(new Camera_impl(init_info)) {
assert(pimpl_ != nullptr);
}
void senshamart::Camera::add_frame(void* data, std::size_t size) {
add_frame(data, size, Clock::now());
}
void senshamart::Camera::add_frame(void* data, std::size_t size, Clock::time_point time) {
static_cast<Camera_impl*>(pimpl_.get())->add_frame(data, size, time);
}
void senshamart::Camera::add_frame(cv::Mat const& cv_frame) {
add_frame(cv_frame, Clock::now());
}
void senshamart::Camera::add_frame(cv::Mat const& cv_frame, Clock::time_point time) {
if(!cv_frame.isContinuous()) {
cv::Mat copying_to;
cv_frame.copyTo(copying_to);
assert(copying_to.isContinuous());
static_cast<Camera_impl*>(pimpl_.get())->add_frame(copying_to.data, copying_to.elemSize() * copying_to.total(), time);
} else {
static_cast<Camera_impl*>(pimpl_.get())->add_frame(cv_frame.data, cv_frame.elemSize() * cv_frame.total(), time);
}
}
void senshamart::Camera::add_gps(Latitude latitude, Longitude longitude, double speed, Clock::time_point time) {
add_gps(longitude, latitude, speed, time);
}
void senshamart::Camera::add_gps(Longitude longitude, Latitude latitude, double speed, Clock::time_point time) {
static_cast<Camera_impl*>(pimpl_.get())->add_gps(longitude, latitude, speed, time);
}
void senshamart::Camera::Pimpl_deleter_::operator()(void* p) const noexcept {
if(p != nullptr) {
delete static_cast<Camera_impl*>(p);
}
}

View file

@ -0,0 +1,22 @@
cmake_minimum_required (VERSION 3.8)
find_package(OpenCV REQUIRED)
find_package(PahoMqttCpp CONFIG REQUIRED)
include(FindFFmpeg.cmake)
add_executable(demo_show_video
"src/demo_show_video.cpp")
target_include_directories(demo_show_video PUBLIC
${OpenCV_INCLUDE_DIRS}
${FFMPEG_INCLUDE_DIRS})
target_link_libraries(demo_show_video PRIVATE
${FFMPEG_LIBRARIES}
${OpenCV_LIBRARIES}
PahoMqttCpp::paho-mqttpp3)
target_compile_features(demo_show_video PUBLIC
cxx_std_17)

View file

@ -0,0 +1,39 @@
# rules for finding the FFmpeg libraries
if(WIN32)
find_package(FFMPEG)
else()
find_package(PkgConfig REQUIRED)
pkg_check_modules(PC_FFMPEG REQUIRED libavformat libavcodec libavutil libswscale)
find_path(AVFORMAT_INCLUDE_DIR libavformat/avformat.h HINTS ${PC_FFMPEG_LIBAVFORMAT_INCLUDEDIR} ${PC_FFMPEG_INCLUDE_DIRS})
find_library(AVFORMAT_LIBRARY NAMES libavformat avformat HINTS ${PC_FFMPEG_LIBAVFORMAT_LIBDIR} ${PC_FFMPEG_LIBRARY_DIRS})
find_path(AVCODEC_INCLUDE_DIR libavcodec/avcodec.h HINTS ${PC_FFMPEG_LIBAVCODEC_INCLUDEDIR} ${PC_FFMPEG_INCLUDE_DIRS})
find_library(AVCODEC_LIBRARY NAMES libavcodec avcodec HINTS ${PC_FFMPEG_LIBAVCODEC_LIBDIR} ${PC_FFMPEG_LIBRARY_DIRS})
find_path(AVUTIL_INCLUDE_DIR libavutil/avutil.h HINTS ${PC_FFMPEG_LIBAVUTIL_INCLUDEDIR} ${PC_FFMPEG_INCLUDE_DIRS})
find_library(AVUTIL_LIBRARY NAMES libavutil avutil HINTS ${PC_FFMPEG_LIBAVUTIL_LIBDIR} ${PC_FFMPEG_LIBRARY_DIRS})
find_path(SWSCALE_INCLUDE_DIR libswscale/swscale.h HINTS ${PC_FFMPEG_LIBSWSCALE_INCLUDEDIR} ${PC_FFMPEG_INCLUDE_DIRS})
find_library(SWSCALE_LIBRARY NAMES libawscale swscale HINTS ${PC_FFMPEG_LIBSWSCALE_LIBDIR} ${PC_FFMPEG_LIBRARY_DIRS})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(AVFormat DEFAULT_MSG AVFORMAT_LIBRARY AVFORMAT_INCLUDE_DIR)
find_package_handle_standard_args(AVCodec DEFAULT_MSG AVCODEC_LIBRARY AVCODEC_INCLUDE_DIR)
find_package_handle_standard_args(AVUtil DEFAULT_MSG AVUTIL_LIBRARY AVUTIL_INCLUDE_DIR)
find_package_handle_standard_args(SWScale DEFAULT_MSG SWSCALE_LIBRARY SWSCALE_INCLUDE_DIR)
mark_as_advanced(AVFORMAT_INCLUDE_DIR AVFORMAT_LIBRARY)
mark_as_advanced(AVCODEC_INCLUDE_DIR AVCODEC_LIBRARY)
mark_as_advanced(AVUTIL_INCLUDE_DIR AVUTIL_LIBRARY)
mark_as_advanced(SWSCALE_INCLUDE_DIR SWSCALE_LIBRARY)
set(FFMPEG_INCLUDE_DIRS ${AVFORMAT_INCLUDE_DIR} ${AVCODEC_INCLUDE_DIR} ${AVUTIL_INCLUDE_DIR} ${SWSCALE_INCLUDE_DIR})
set(FFMPEG_LIBRARIES ${AVFORMAT_LIBRARY} ${AVCODEC_LIBRARY} ${AVUTIL_LIBRARY} ${SWSCALE_LIBRARY})
if(${AVFORMAT_FOUND} AND ${AVCODEC_FOUND} AND ${AVUTIL_FOUND} AND ${SWSCALE_FOUND})
set(FFMPEG_FOUND TRUE)
else()
set(FFMPEG_FOUND FALSE)
endif()
endif()

View file

@ -0,0 +1,78 @@
#include <cstdio>
#include <vector>
#include <deque>
#include <fstream>
#include <thread>
#include <opencv2/opencv.hpp>
#include "mqtt/client.h"
int main(int argc, const char** argv) {
if (argc <= 3) {
fprintf(stderr, "Expected %s <header location> <broker location> <channel name>\n", argv[0]);
return -1;
}
const char* const header_location = argv[1]; //"C:\\users\\dekibeki\\work\\video_header";//
const char* const broker_location = argv[2]; //"tcp://136.186.108.94:5004";//
const char* const channel_name = argv[3]; //"out/873304a31447291aa9a701bfdfb7076f35f070a7a1473521610bfa6a77858569/0";//
int in = 0;
std::vector<char> header;
std::ifstream file{ header_location, std::ios_base::binary };
while ((in = file.get()) != std::ifstream::traits_type::eof()) {
header.push_back(static_cast<std::uint8_t>(in));
}
if (header.empty()) {
fprintf(stderr, "Empty/non-existent header at `%s`", header_location);
return -1;
}
mqtt::client mqtt_client(broker_location, "demo show video");
auto connOpts = mqtt::connect_options_builder()
.keep_alive_interval(std::chrono::seconds(30))
.automatic_reconnect(std::chrono::seconds(2), std::chrono::seconds(30))
.clean_session(false)
.finalize();
for (;;) {
mqtt::connect_response rsp = mqtt_client.connect(connOpts);
if (!rsp.is_session_present()) {
mqtt_client.subscribe(channel_name);
}
for (std::size_t i = 0;;++i) {
auto msg = mqtt_client.consume_message();
if (msg) {
fprintf(stderr, "New segment: %zd\n", i);
const mqtt::string data = msg->to_string();
FILE* temp_file = fopen("./test.webm", "wb");
fwrite(header.data(), 1, header.size(), temp_file);
fwrite(data.data(), 1, data.size(), temp_file);
fclose(temp_file);
cv::VideoCapture reader("./test.webm");
cv::Mat frame;
while (reader.read(frame)) {
cv::imshow(channel_name, frame);
cv::waitKey(1000 / 5);
}
} else if (!mqtt_client.is_connected()) {
fprintf(stderr, "No connection, sleeping\n");
while (!mqtt_client.is_connected()) {
std::this_thread::sleep_for(std::chrono::milliseconds(250));
}
fprintf(stderr, "Reconnected\n");
}
}
}
}

View file

@ -0,0 +1,42 @@
cmake_minimum_required (VERSION 3.8)
#Boost and openssl for https
find_package(Boost REQUIRED)
find_package(OpenSSL REQUIRED)
#rapidjson
find_package(RapidJSON CONFIG REQUIRED)
#spdlog
find_package(spdlog CONFIG REQUIRED)
#date
include( FetchContent )
FetchContent_Declare(date_src
GIT_REPOSITORY https://github.com/HowardHinnant/date.git
GIT_TAG v3.0.0 # adjust tag/branch/commit as needed
)
SET(BUILD_TZ_LIB ON)
SET(USE_SYSTEM_TZ_DB ON)
FetchContent_MakeAvailable(date_src)
add_executable(etl
"src/etl.cpp")
target_include_directories(etl PRIVATE
${RAPIDJSON_INCLUDE_DIRS})
target_link_libraries(etl PRIVATE
${OPENSSL_LIBRARIES}
core
spdlog::spdlog
date::date
senshamart_client)
IF(WIN32)
target_compile_options(etl PRIVATE "/bigobj")
ENDIF()

View file

@ -0,0 +1,604 @@
#include <boost/asio/io_service.hpp>
#include <boost/asio/deadline_timer.hpp>
#include <boost/beast.hpp>
#include <boost/beast/ssl.hpp>
#include <memory>
#include <optional>
#include <unordered_map>
#include <string_view>
#include <sstream>
#include <rapidjson/document.h>
#include <date/date.h>
#include <spdlog/spdlog.h>
#include <spdlog/sinks/rotating_file_sink.h>
#include <spdlog/sinks/stdout_sinks.h>
#include <senshamart/senshamart_client.hpp>
namespace {
using Clock = std::chrono::system_clock;
char to_base64_char(char c) {
assert(c >= 0 && c < 64);
constexpr std::array<char, 64> conversion_table = {
'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',
'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',
'0','1','2','3','4','5','6','7','8','9','+','/' };
return conversion_table[c];
}
std::string to_base64(boost::string_view str) {
std::string returning;
const std::size_t len = str.size() * 8;
char cur_sextet = 0;
std::size_t sextet_size = 0;
for (std::size_t i = 0; i < str.size(); ++i) {
for (std::size_t j = 0; j < CHAR_BIT; ++j) {
cur_sextet <<= 1;
cur_sextet |= (str[i] >> (CHAR_BIT - j - 1)) & 0x01;
sextet_size++;
if (sextet_size == 6) {
returning.push_back(to_base64_char(cur_sextet));
cur_sextet = 0;
sextet_size = 0;
}
}
}
if (sextet_size != 0) {
returning.push_back(to_base64_char(cur_sextet << (6 - sextet_size)));
if (sextet_size == 4) {
returning.push_back('=');
} else if (sextet_size == 2) {
returning.push_back('=');
returning.push_back('=');
}
}
return returning;
}
std::string as_string(Clock::time_point time) noexcept {
return date::format("%FT%TZ", date::floor<std::chrono::milliseconds>(time));
}
std::optional<Clock::time_point> from_string(std::string const& str) noexcept {
int year = 0;
unsigned int month = 0;
unsigned int day = 0;
unsigned int hour = 0;
unsigned int minute = 0;
unsigned int second = 0;
unsigned int milliseconds = 0;
int read_count = 0;
if (sscanf(str.c_str(), "%d-%u-%uT%u:%u:%u.%3uZ%n",
&year, &month, &day, &hour, &minute, &second, &milliseconds, &read_count) != 7
|| read_count != str.size()) {
return std::nullopt;
}
const auto y_m_d = date::year_month_day{
date::year{ static_cast<int>(year) },
date::month{ month },
date::day{ day } };
const auto time =
std::chrono::hours{ hour }
+ std::chrono::minutes{ minute }
+ std::chrono::seconds{ second }
+ std::chrono::milliseconds{ milliseconds };
return std::chrono::time_point_cast<std::chrono::system_clock::duration>(y_m_d.operator date::sys_days() + time);
}
struct Decoded_type {
std::string_view type;
std::string_view sensor;
std::string_view connection_type;
std::string_view interface;
};
template<typename It1, typename It2>
std::string_view make_string_view(It1 begin, It2 end) {
return std::string_view{ &*begin, static_cast<std::size_t>(std::distance(begin, end)) };
}
std::optional<Decoded_type> decode_type(std::string_view type) {
const auto first_space = std::find(type.begin(), type.end(), ' ');
if (first_space == type.end()) {
return std::nullopt;
}
const auto second_space = std::find(first_space + 1, type.end(), ' ');
if (second_space == type.end()) {
return std::nullopt;
}
const auto third_space = std::find(second_space + 1, type.end(), ' ');
if (third_space == type.end()) {
return std::nullopt;
}
//make sure we only have 3 spaces
if (std::find(third_space + 1, type.end(), ' ') != type.end()) {
return std::nullopt;
}
Decoded_type returning;
returning.type = make_string_view(type.begin(), first_space);
returning.sensor = make_string_view(first_space + 1, second_space);
returning.connection_type = make_string_view(second_space + 1, third_space);
returning.interface = make_string_view(third_space + 1, type.end());
return returning;
}
//cumulocity stuff
const auto data_refresh_period = boost::posix_time::seconds{ 10 };
const auto reconnect_period = boost::posix_time::minutes{ 1 };
const auto resend_period = boost::posix_time::minutes{ 5 };
struct Send_info {
senshamart::Client* sending_to;
std::string sending;
};
struct Refresh_info {
std::int64_t iot_device_id;
Clock::time_point last_read_time;
};
struct Cumulocity_requester :
public std::enable_shared_from_this<Cumulocity_requester> {
public:
Cumulocity_requester(
boost::asio::io_service& io,
senshamart::Client external_temp_sensor,
senshamart::Client external_humidity_sensor,
senshamart::Client milk_temp_sensor,
senshamart::Client milk_level_sensor,
std::string_view host,
boost::asio::ssl::context& ctx,
const char* username,
const char* password,
spdlog::logger& undecodable,
spdlog::logger& log,
std::int64_t device_id) :
sending_(),
recv_buffer_(),
recv_response_(),
io_(io),
ctx_(ctx),
location_(),
socket_(io, ctx),
host_(host.data(), host.size()),
target_endpoint_(),
reconnect_timer_(io),
refresh_timer_(io),
resend_timer_(io),
external_temp_sensor_(std::move(external_temp_sensor)),
external_humidity_sensor_(std::move(external_humidity_sensor)),
milk_temp_sensor_(std::move(milk_temp_sensor)),
milk_level_sensor_(std::move(milk_level_sensor)),
undecodable_(undecodable),
log_(log) {
boost::asio::ip::tcp::resolver resolver(io);
auto resolved = resolver.resolve(host_, "443");
if (resolved == boost::asio::ip::tcp::resolver::iterator()) {
log_.critical("Could not resolve host\n");
throw "Could not resolve host\n";
}
target_endpoint_ = *resolved;
std::string combined;
combined.append(username);
combined.push_back(':');
combined.append(password);
auth_ = "Basic ";
auth_.append(to_base64(combined));
refresh_info_.iot_device_id = device_id;
refresh_info_.last_read_time = Clock::now();
}
void start() {
refresh_();
set_resend_timer_();
}
private:
void set_reconnect_timer_() {
reconnect_timer_.expires_from_now(reconnect_period);
reconnect_timer_.async_wait([me = this->shared_from_this()](boost::system::error_code ec) {
if (!ec) {
me->reconnect_();
}
});
}
void reconnect_() {
socket_.async_shutdown([me = this->shared_from_this()](boost::system::error_code ec) {
me->socket_.next_layer().close();
me->socket_ = boost::beast::ssl_stream<boost::beast::tcp_stream>{me->io_, me->ctx_};
me->socket_.next_layer().async_connect(me->target_endpoint_,
[me](boost::system::error_code ec) {
if (ec) {
me->set_reconnect_timer_();
} else {
//successful reconnect
me->socket_.async_handshake(boost::asio::ssl::stream_base::handshake_type::client,
[me](boost::system::error_code ec) {
if (ec) {
me->set_reconnect_timer_();
} else {
//successful handshake
me->do_refresh_();
}
});
}
});
});
}
void refresh_() {
log_.info("refreshing");
do_refresh_();
}
void do_refresh_() {
//clear prev state
resend_info_.clear();
//generate request
sending_.method(boost::beast::http::verb::get);
sending_.version(11);
std::stringstream building_target;
building_target << "/measurement/measurements?source=" << refresh_info_.iot_device_id
<< "&dateFrom=" << as_string(refresh_info_.last_read_time)
<< "&pageSize=5000";
log_.info("Refreshing with target: '{}'", building_target.str());
sending_.target(building_target.str());
sending_.keep_alive(true);
sending_.set(boost::beast::http::field::host, host_);
sending_.set(boost::beast::http::field::authorization, auth_);
recv_buffer_.clear();
recv_response_.body().clear();
boost::beast::http::async_write(socket_, sending_,
[me = this->shared_from_this()](boost::system::error_code ec, std::size_t n) {
if (ec) {
me->log_.warn("Couldn't write to cumulocity: {}", ec.message());
me->reconnect_();
} else {
boost::beast::http::async_read(me->socket_, me->recv_buffer_, me->recv_response_,
[me](boost::system::error_code ec, std::size_t n) {
if (ec) {
me->log_.warn("Couldn't read from cumulocity: {}", ec.message());
me->reconnect_();
} else {
me->do_recv_();
}
});
}
});
//send
}
void recv_measurement_(rapidjson::Value const& measurement) {
const auto now = Clock::now();
const auto upper_limit = now + std::chrono::hours{ 24 * 7 };
if (!measurement.IsObject()) {
return;
}
const auto found_id = measurement.FindMember("id");
if (found_id == measurement.MemberEnd()) {
log_.warn("Member id not found");
return;
}
if(!found_id->value.IsString()) {
log_.warn("Member id is not a string");
return;
}
const int64_t id_as_int = atoll(found_id->value.GetString());
const auto found_time = measurement.FindMember("time");
if(found_time == measurement.MemberEnd()) {
log_.warn("Member time is not found");
return;
}
if(!found_time->value.IsString()) {
log_.warn("Member time is not a string");
return;
}
const auto time_val = from_string(found_time->value.GetString());
if (!time_val.has_value()) {
log_.warn("Couldn't parse time: {}", found_time->value.GetString());
return;
}
//if over 48 hours into the future, ignore
if (time_val.value() > std::chrono::system_clock::now() + std::chrono::hours{48}) {
return;
}
refresh_info_.last_read_time = std::max(refresh_info_.last_read_time, time_val.value());
for (auto measurement_iter = measurement.MemberBegin(); measurement_iter != measurement.MemberEnd(); ++measurement_iter) {
//if we are metadata, skip
if (!measurement_iter->name.IsString() || !measurement_iter->value.IsObject()) {
continue;
}
const std::string_view fragment_name{ measurement_iter->name.GetString(), measurement_iter->name.GetStringLength() };
if (fragment_name == "id") {
continue;
} else if (fragment_name == "self") {
continue;
} else if (fragment_name == "time") {
continue;
} else if (fragment_name == "type") {
continue;
} else if (fragment_name == "source") {
continue;
}
//we aren't the metadata, we are a fragment
auto const& fragment = measurement_iter->value;
for (auto fragment_iter = fragment.MemberBegin(); fragment_iter != fragment.MemberEnd(); ++fragment_iter) {
if (!fragment_iter->name.IsString()) {
continue;
}
if (!fragment_iter->value.IsObject()) {
continue;
}
auto const& reading = fragment_iter->value;
const auto found_unit = reading.FindMember("unit");
if(found_unit == reading.MemberEnd() || !found_unit->value.IsString()) {
continue;
}
const auto found_value = reading.FindMember("value");
if (found_value == reading.MemberEnd() || !found_value->value.IsNumber()) {
continue;
}
const std::string_view unit{ found_unit->value.GetString(), found_unit->value.GetStringLength() };
const double value = found_value->value.GetDouble();
//if we can't decode fragment name, log it, and skip
const auto decoded_type = decode_type(fragment_name);
if (!decoded_type.has_value()) {
undecodable_.warn("time, fragment name, unit, device id, id, value = {}, {}, {}, {}, {}, {}",
std::string_view{found_time->value.GetString(), found_time->value.GetStringLength()},
std::string_view{ fragment_name.data(), fragment_name.size() },
std::string_view{ unit.data(), unit.size() },
refresh_info_.iot_device_id,
id_as_int,
value);
continue;
}
if(time_val.value() >= upper_limit) {
//we have a problem with devices returning bogus timestamps in 2035, this will stop those
continue;
}
senshamart::Client* sending_to = nullptr;
if (decoded_type->type == "Farm_condition_temperature") {
sending_to = &this->external_temp_sensor_;
} else if (decoded_type->type == "Milk_quantity") {
sending_to = &this->milk_level_sensor_;
} else if (decoded_type->type == "Milk_temperature") {
sending_to = &this->milk_temp_sensor_;
} else if (decoded_type->type == "Farm_condition_humidity") {
sending_to = &this->external_humidity_sensor_;
}
if (sending_to == nullptr) {
log_.info("Unused sensor value for {}", decoded_type->type);
} else {
std::stringstream sending;
sending <<
"{"
"\"time\":\"" << found_time->value.GetString() << "\","
"\"value\":" << value <<
"}";
resend_info_.push_back(Send_info{
sending_to,
sending.str() });
}
}
}
}
void do_recv_() {
//read request
do {
if (recv_response_.result() != boost::beast::http::status::ok) {
break;
}
rapidjson::Document body;
if (body.Parse(recv_response_.body().c_str()).HasParseError()) {
break;
}
if (!body.IsObject()) {
break;
}
if (!body.HasMember("measurements")) {
break;
}
rapidjson::Value const& measurement_array = body["measurements"];
if (!measurement_array.IsArray()) {
break;
}
rapidjson::Value::ConstArray as_array = measurement_array.GetArray();
for (auto const& element : as_array) {
recv_measurement_(element);
}
} while (false);
for (auto const& sending : resend_info_) {
sending.sending_to->send(sending.sending);
}
set_timer_();
}
void set_timer_() {
refresh_timer_.expires_from_now(data_refresh_period);
refresh_timer_.async_wait([me = this->shared_from_this()](boost::system::error_code ec) {
if (!ec) {
me->refresh_();
}
});
}
void set_resend_timer_() {
resend_timer_.expires_from_now(resend_period);
resend_timer_.async_wait([me = this->shared_from_this()](boost::system::error_code ec) {
if (!ec) {
me->resend_();
}
});
}
void resend_() {
for (auto const& resending : resend_info_) {
resending.sending_to->send(resending.sending);
}
set_resend_timer_();
}
boost::beast::http::request<boost::beast::http::empty_body> sending_;
boost::beast::flat_buffer recv_buffer_;
boost::beast::http::response<boost::beast::http::string_body> recv_response_;
boost::asio::io_service& io_;
boost::asio::ssl::context& ctx_;
boost::beast::tcp_stream::endpoint_type location_;
boost::beast::ssl_stream<boost::beast::tcp_stream> socket_;
boost::asio::ip::tcp::endpoint target_endpoint_;
std::string host_;
boost::asio::deadline_timer reconnect_timer_;
boost::asio::deadline_timer refresh_timer_;
boost::asio::deadline_timer resend_timer_;
std::vector<Send_info> resend_info_;
std::string auth_;
Refresh_info refresh_info_;
senshamart::Client external_temp_sensor_;
senshamart::Client external_humidity_sensor_;
senshamart::Client milk_temp_sensor_;
senshamart::Client milk_level_sensor_;
spdlog::logger& undecodable_;
spdlog::logger& log_;
};
}
int main(int argc, const char** argv) {
if (argc < 12) {
fprintf(stderr, "Expected"
" %s"
" <cumulocity host>"
" <cumulocity username>"
" <cumulocity password>"
" <broker location>"
" <cumulocity device id>"
" <external temp sensor name>"
" <external humidity sensor name>"
" <milk temp sensor name>"
" <milk level sensor name>"
" <log location>"
" <undecoded log>\n", argv[0]);
return -1;
}
const char* host = argv[1]; //"bega.apj.cumulocity.com";//
const char* username = argv[2]; //"jkaraboticmilovac@swin.edu.au";//
const char* password = argv[3]; //"swin.iotLab";//
const char* broker_location = argv[4]; //"tcp://127.0.0.1:8001";//
const char* cumulocity_device_id_str = argv[5];
const char* external_temp_sensor_name = argv[6];
const char* external_humidity_sensor_name = argv[7];
const char* milk_temp_sensor_name = argv[8];
const char* milk_level_sensor_name = argv[9];
const char* log_location = argv[10]; //"etl_log";//
const char* undecodable_location = argv[11]; //"undecodable.log";//
const auto undecodable_file_sink = std::make_shared<spdlog::sinks::rotating_file_sink_mt>(
undecodable_location, 4 * 1024 * 1024, 4, true);
const auto log_file_sink = std::make_shared<spdlog::sinks::rotating_file_sink_mt>(
log_location, 4 * 1024 * 1024, 4, true);
const auto stderr_sink = std::make_shared<spdlog::sinks::stderr_sink_mt>();
spdlog::logger undecodable{ "undecodable", { undecodable_file_sink, log_file_sink, stderr_sink } };
spdlog::logger log{ "etl", { log_file_sink, stderr_sink } };
log.info("initing");
//milk supply chain monitoring system side
senshamart::Client external_temp_sensor{broker_location, external_temp_sensor_name};
senshamart::Client external_humidity_sensor{broker_location, external_humidity_sensor_name};
senshamart::Client milk_temp_sensor{broker_location, milk_temp_sensor_name};
senshamart::Client milk_level_sensor{broker_location, milk_level_sensor_name};
//cumulocity side stuff
boost::asio::io_service io;
boost::asio::ssl::context ctx(boost::asio::ssl::context_base::tls_client);
ctx.set_verify_callback([](auto&&...) {return true; });
const std::int64_t device_id = std::strtoll(cumulocity_device_id_str, nullptr, 10);
const auto cumulocity_requester = std::make_shared<Cumulocity_requester>(
io,
std::move(external_temp_sensor),
std::move(external_humidity_sensor),
std::move(milk_temp_sensor),
std::move(milk_level_sensor),
host,
ctx,
username,
password,
undecodable,
log,
device_id);
log.info("starting");
cumulocity_requester->start();
io.run();
return 0;
}

View file

@ -0,0 +1,15 @@
cmake_minimum_required (VERSION 3.8)
find_package(PahoMqttCpp CONFIG REQUIRED)
add_library(senshamart_client STATIC
"src/senshamart_client.cpp")
target_include_directories(senshamart_client PUBLIC
"public_include")
target_link_libraries(senshamart_client PRIVATE
PahoMqttCpp::paho-mqttpp3)
target_compile_features(senshamart_client PUBLIC
cxx_std_17)

View file

@ -0,0 +1,22 @@
#include <string>
#include <memory>
namespace senshamart {
class Client final {
public:
Client(std::string broker_endpoint, std::string sensor_name);
Client(Client const&) = delete;
Client(Client&&) = default;
Client& operator=(Client const&) = delete;
Client& operator=(Client&&) = default;
void send(std::string);
private:
struct Pimpl_deleter_ {
void operator()(void*) const noexcept;
};
std::unique_ptr<void, Pimpl_deleter_> pimpl_;
};
}

View file

@ -0,0 +1,86 @@
#include <senshamart/senshamart_client.hpp>
#include <mqtt/async_client.h>
namespace {
class Client_impl final : mqtt::callback {
public:
Client_impl(std::string broker_endpoint, std::string sensor_name) :
mqtt_client_(std::move(broker_endpoint), sensor_name) {
auto connOpts = mqtt::connect_options_builder()
.clean_session()
.keep_alive_interval(std::chrono::seconds(30))
.automatic_reconnect(std::chrono::seconds(2), std::chrono::seconds(30))
.max_inflight(2)
.finalize();
mqtt_client_.connect(connOpts);
video_topic_ = "in/" + sensor_name;
}
void send(std::string data) {
auto msg = mqtt::make_message(video_topic_, std::move(data));
msg->set_qos(0);
msg->set_retained(false);
try {
mqtt_client_.publish(std::move(msg));
} catch (mqtt::exception const& ex) {
#if _DEBUG
fprintf(stderr, "Failed send: %s\n", ex.to_string().c_str());
#endif
}
}
void close() {
mqtt_client_.disconnect()->wait();
}
~Client_impl() {
close();
}
private:
void connected(std::string const& cause) override {
#if _DEBUG
fprintf(stderr, "Connected\n");
if (!cause.empty()) {
fprintf(stderr, "\tcause: %s\n", cause.c_str());
}
#endif
}
void connection_lost(const std::string& cause) override {
#if _DEBUG
fprintf(stderr, "Connection lost\n");
if (!cause.empty()) {
fprintf(stderr, "\tcause: %s\n", cause.c_str());
}
#endif
}
void delivery_complete(mqtt::delivery_token_ptr tok) override {
#if _DEBUG
fprintf(stderr, "Delivery complete for token: %d\n",
(tok ? tok->get_message_id() : -1));
#endif
}
mqtt::async_client mqtt_client_;
std::string video_topic_;
};
}
senshamart::Client::Client(std::string broker_endpoint, std::string sensor_name) {
Client_impl* const created = new Client_impl(std::move(broker_endpoint), std::move(sensor_name));
pimpl_.reset(created);
}
void senshamart::Client::send(std::string sending) {
static_cast<Client_impl*>(pimpl_.get())->send(std::move(sending));
}
void senshamart::Client::Pimpl_deleter_::operator()(void* p) const noexcept {
delete static_cast<Client_impl*>(p);
}

View file

@ -0,0 +1,14 @@
[Unit]
Description=Cumulocity etl
After=network.target
[Service]
Type=simple
Restart=always
RestartSec=1
User=ubuntu
ExecStart=demo/demo/milk_client/etl bega.apj.cumulocity.com jkaraboticmilovac@swin.edu.au swin.iotLab tcp://136.186.108.94:5004 15396922 MilkExternalTemp1 MilkExternalHumidity1 MilkMilkTemp1 MilkMilkLevel1 etl_log undecodable.log
WorkingDirectory=/home/ubuntu
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,2 @@
@echo off
plink -batch -i %1 ubuntu@%2 "curl -fsSL https://deb.nodesource.com/setup_20.x | sudo -E bash - && sudo apt-get install -y nodejs"

Some files were not shown because too many files have changed in this diff Show more