Cassandra Java Batch Statement Example
Best Java code snippets using com.datastax.driver.core.BatchStatement (Showing top 20 results out of 576)
Refine search
private boolean removeTask(Task task) { try { WorkflowMetadata workflowMetadata = getWorkflowMetadata(task.getWorkflowInstanceId()); int totalTasks = workflowMetadata.getTotalTasks(); removeTaskLookup(task); recordCassandraDaoRequests( "removeTask" , task.getTaskType(), task.getWorkflowType()); BatchStatement batchStatement = new BatchStatement (); batchStatement. add (deleteTaskStatement. bind (UUID.fromString(task.getWorkflowInstanceId()), DEFAULT_SHARD_ID, task.getTaskId())); batchStatement. add (updateTotalTasksStatement. bind (totalTasks - 1 , UUID.fromString(task.getWorkflowInstanceId()), DEFAULT_SHARD_ID)); ResultSet resultSet = session. execute (batchStatement); return resultSet.wasApplied(); } catch (Exception e) { Monitors.error(CLASS_NAME, "removeTask" ); String errorMsg = String.format( "Failed to remove task: %s" , task.getTaskId()); LOGGER.error(errorMsg, e); throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, errorMsg); } }
protected void executeBatch(BatchStatement batch) { LOG.debug( "Execute cassandra batch {}" , batch); batch. setConsistencyLevel (getWriteConsistencyLevel()); ResultSet resultSet = getSession(). execute (batch); LOG.debug( "Executed batch {}" , resultSet); }
Record record; batchStatement = new BatchStatement (BatchStatement.Type.valueOf(batchStatementType)); batchStatement. setSerialConsistencyLevel (ConsistencyLevel.valueOf(serialConsistencyLevel)); insertQuery.value(fieldName, recordContentMap.get(fieldName)); batchStatement. add (insertQuery); connectionSession. execute (batchStatement); batchStatement. clear (); if (batchStatement. size () != 0 ) { connectionSession. execute (batchStatement); batchStatement. clear (); stopWatch.stop(); long duration = stopWatch.getDuration(TimeUnit.MILLISECONDS); String transitUri = "cassandra://" + connectionSession. getCluster ().getMetadata().getClusterName() + "." + cassandraTable;
@Override public List<Statement> map(Map<String, Object> conf, Session session, ITuple tuple) { final BatchStatement batch = new BatchStatement ( this .type); for (CQLStatementTupleMapper m : mappers) batch. addAll (m.map(conf, session, tuple)); return Arrays.asList((Statement) batch); } }
@Override public PairBatchStatementTuples apply(List<PairStatementTuple> l) { final List<Tuple> inputs = new LinkedList<>(); final BatchStatement batch = new BatchStatement (BatchStatement.Type.UNLOGGED); for (PairStatementTuple pair : l) { batch. add (pair.getStatement()); inputs.add(pair.getTuple()); } return new PairBatchStatementTuples(inputs, batch); } });
public void updateState(List<TridentTuple> tuples, final TridentCollector collector) { List<Statement> statements = new ArrayList<>(); for (TridentTuple tuple : tuples) { statements.addAll(options.cqlStatementTupleMapper.map(conf, session, tuple)); } try { if (options.batchingType != null) { BatchStatement batchStatement = new BatchStatement (options.batchingType); batchStatement. addAll (statements); session. execute (batchStatement); } else { for (Statement statement : statements) { session. execute (statement); } } } catch (Exception e) { LOG.warn( "Batch write operation is failed." ); collector.reportError(e); throw new FailedException(e); } }
@ Test (groups = "short" ) @CassandraVersion( value = "2.0.9" , description = "This will only work with C* 2.0.9 (CASSANDRA-7337)" ) public void casBatchTest() { PreparedStatement st = session(). prepare ( "INSERT INTO test (k, v) VALUES (?, ?) IF NOT EXISTS" ); BatchStatement batch = new BatchStatement (); batch. add ( new SimpleStatement( "INSERT INTO test (k, v) VALUES (?, ?)" , "key1" , 0 )); batch. add (st. bind ( "key1" , 1 )); batch. add (st. bind ( "key1" , 2 )); assertEquals( 3 , batch. size ()); ResultSet rs = session(). execute (batch); Row r = rs.one(); assertTrue(!r.isNull( "[applied]" )); assertEquals(r.getBool( "[applied]" ), true ); rs = session(). execute (batch); r = rs.one(); assertTrue(!r.isNull( "[applied]" )); assertEquals(r.getBool( "[applied]" ), false ); } }
@ Test (groups = "short" ) public void should_apply_statement_timestamp_only_to_batched_queries_without_timestamp() { BatchStatement batch = new BatchStatement (); batch. add ( new SimpleStatement( "INSERT INTO foo (k, v) VALUES (1, 1)" )); batch. add ( new SimpleStatement( "INSERT INTO foo (k, v) VALUES (2, 1) USING TIMESTAMP 20" )); batch. setDefaultTimestamp ( 10 ); session(). execute (batch); long writeTime1 = session(). execute ( "SELECT writeTime(v) FROM foo WHERE k = 1" ).one().getLong( 0 ); long writeTime2 = session(). execute ( "SELECT writeTime(v) FROM foo WHERE k = 2" ).one().getLong( 0 ); assertEquals(writeTime1, 10 ); assertEquals(writeTime2, 20 ); }
@ Test ( groups = "unit" , expectedExceptions = {UnsupportedOperationException. class }) public void should_fail_when_setting_paging_state_on_batch_statement() { PagingState emptyStatement = PagingState.fromString( "00000000" ); BatchStatement batch = new BatchStatement (); batch. setPagingState (emptyStatement); }
@Override public void deleteAllShards(String queueName, String region) { BatchStatement batch = new BatchStatement (); Shard.Type[] shardTypes = new Shard.Type[]{Shard.Type.DEFAULT, Shard.Type.INFLIGHT}; for (Shard.Type shardType : shardTypes) { Statement delete = QueryBuilder.delete().from( getTableName( shardType ) ) .where( QueryBuilder.eq(COLUMN_QUEUE_NAME, queueName) ) .and( QueryBuilder.eq(COLUMN_REGION, region) ); logger.trace( "Removing shards for queue {} region {} shardType {} query {}" , queueName, region, shardType, batch.toString()); batch. add ( delete ); } cassandraClient.getQueueMessageSession(). execute ( batch ); }
public void load(Iterator<List<Object>> rows) { PreparedStatement statement = session. prepare (insertQuery); BatchStatement batch = createBatchStatement(); while (rows.hasNext()) { if (batch. size () >= batchRowsCount) { session. execute (batch); batch = createBatchStatement(); } List<Object> row = rows.next(); checkState(row.size() == columnsCount, "values count in a row is expected to be %d, but found: %d" , columnsCount, row.size()); batch. add (statement. bind (row.toArray())); } if (batch. size () > 0 ) { session. execute (batch); } }
@Override public ResultSet commit() { ResultSet rs = this .session. execute ( this .batch); this .batch. clear (); return rs; }
private void executeBatch(BatchStatement batchState) { if (batchState. size () > 0 ) { m_logger.debug( "Executing synchronous batch with {} statements" , batchState. size ()); m_dbservice.getSession(). execute (batchState); } }
public void write(Object[] args) { PersonRegister person = PersonRegister.getInstance(args); Statement query = QueryBuilder.insertInto( "PersonRegister" ).value( "MSDIN" , person.getMsdin()) .value( "firstName" , person.getFirstName()).value( "lastName" , person.getLastName()) .value( "birthDate" , person.getBirthDate()).value( "gender" , person.getGender()) .setConsistencyLevel(ConsistencyLevel.QUORUM);; batchStatement. add (query); numStatement++; if (numStatement == NUM_BATCH_STATEMENT) { LOGGER.info( "Execute batch query PersonRegister: " + numIteration++); session. execute (batchStatement); batchStatement. clear (); numStatement = 0 ; } }
private void mutateManyLogged( final Map<String, Map<StaticBuffer, KCVMutation>> mutations, final StoreTransaction txh) throws BackendException { final MaskedTimestamp commitTime = new MaskedTimestamp(txh); final BatchStatement batchStatement = new BatchStatement (Type.LOGGED); batchStatement. setConsistencyLevel (getTransaction(txh).getWriteConsistencyLevel()); batchStatement. addAll (Iterator.ofAll(mutations.entrySet()).flatMap(tableNameAndMutations -> { final String tableName = tableNameAndMutations.getKey(); final Map<StaticBuffer, KCVMutation> tableMutations = tableNameAndMutations.getValue(); final CQLKeyColumnValueStore columnValueStore = Option.of( this .openStores.get(tableName)) .getOrElseThrow(() -> new IllegalStateException( "Store cannot be found: " + tableName)); return Iterator.ofAll(tableMutations.entrySet()).flatMap(keyAndMutations -> { final StaticBuffer key = keyAndMutations.getKey(); final KCVMutation keyMutations = keyAndMutations.getValue(); final Iterator<Statement> deletions = Iterator.of(commitTime.getDeletionTime( this .times)) .flatMap(deleteTime -> Iterator.ofAll(keyMutations.getDeletions()).map(deletion -> columnValueStore.deleteColumn(key, deletion, deleteTime))); final Iterator<Statement> additions = Iterator.of(commitTime.getAdditionTime( this .times)) .flatMap(addTime -> Iterator.ofAll(keyMutations.getAdditions()).map(addition -> columnValueStore.insertColumn(key, addition, addTime))); return Iterator.concat(deletions, additions); }); })); final Future<ResultSet> result = Future.fromJavaFuture( this .executorService, this .session. executeAsync (batchStatement)); result.await(); if (result.isFailure()) { throw EXCEPTION_MAPPER.apply(result.getCause().get()); } sleepAfterWrite(txh, commitTime); }
public void addRowKey(String metricName, DataPointsRowKey rowKey, int rowKeyTtl) { m_newRowKeys.add(rowKey); ByteBuffer bb = ByteBuffer.allocate( 8 ); bb.putLong( 0 , rowKey.getTimestamp()); Statement bs = m_clusterConnection.psRowKeyTimeInsert. bind () .setString( 0 , metricName) .setTimestamp( 1 , new Date(rowKey.getTimestamp())) .setInt( 2 , rowKeyTtl) .setIdempotent( true ); bs.setConsistencyLevel(m_consistencyLevel); rowKeyBatch. add (bs); bs = m_clusterConnection.psRowKeyInsert. bind () .setString( 0 , metricName) .setTimestamp( 1 , new Date(rowKey.getTimestamp())) .setString( 2 , rowKey.getDataType()) .setMap( 3 , rowKey.getTags()) .setInt( 4 , rowKeyTtl) .setIdempotent( true ); bs.setConsistencyLevel(m_consistencyLevel); rowKeyBatch. add (bs); }
BatchStatement batch = new BatchStatement (); batch. add (mutation.bindStatement(prepStatement)); if (batch. size () > 0 ) { ses = session(); ses.execute(tuneStatementExecutionOptions(batch));
@Override public <T> Observable<Integer> updateMetricsIndex(Observable<Metric<T>> metrics) { return metrics.map(Metric::getMetricId) .map(id -> updateMetricsIndex. bind (id.getTenantId(), id.getType().getCode(), id.getName())) .compose( new BatchStatementTransformer()) .flatMap(batch -> rxSession.execute(batch).map(resultSet -> batch. size ())); }
public void commitAsync() { Collection<Statement> statements = this .batch. getStatements (); int count = 0 ; int processors = Math.min(statements.size(), 1023 ); List<ResultSetFuture> results = new ArrayList<>(processors + 1 ); for (Statement s : statements) { ResultSetFuture future = this .session. executeAsync (s); results.add(future); if (++count > processors) { results.forEach(ResultSetFuture::getUninterruptibly); results.clear(); count = 0 ; } } for (ResultSetFuture future : results) { future.getUninterruptibly(); } this .batch. clear (); }
private BatchStatement getCachedPreparedStatement() { final List<CqlColumnListMutationImpl<?, ?>> colListMutations = getColumnMutations(); if (colListMutations == null || colListMutations.size() == 0 ) { return new BatchStatement (Type.UNLOGGED); } ColListMutationType mutationType = colListMutations.get( 0 ).getType(); BatchStatement batch = new BatchStatement (Type.UNLOGGED); if (mutationType == ColListMutationType.CounterColumnsUpdate) { batch = new BatchStatement (Type.COUNTER); } else if (useAtomicBatch()) { batch = new BatchStatement (Type.LOGGED); } for (CqlColumnListMutationImpl<?, ?> colListMutation : colListMutations) { CFMutationQueryGen queryGen = colListMutation.getMutationQueryGen(); queryGen.addColumnListMutationToBatch(batch, colListMutation, useCaching); } batch. setConsistencyLevel (ConsistencyLevelMapping.getCL( this .getConsistencyLevel())); return batch; }
Source: https://www.tabnine.com/code/java/classes/com.datastax.driver.core.BatchStatement
0 Response to "Cassandra Java Batch Statement Example"
Postar um comentário