If you’re using Azure Table Storage then it’s often more efficent to perform operations such as insert, update, delete in batches rather than making seperate calls for each individual row.
There can be up to 100 entities in each batch and each all entities in a batch need to have the same partition key. Below is a functionwhich accepts a list of entities and splits them by partition key into lists of the desired size to help with batching.
private List<List<LookupEntity>> ChunkProductEntities(List<LookupEntity> lookupEntities, int chunkSize)
{
List<List<LookupEntity>> lookupEntitiesChunked = lookupEntities
.GroupBy(x => x.PartitionKey)
.Select(x => x.Select(v => v).ToList())
.Select(x => x.Select((y, index) => new { Index = index, Value = y })
.GroupBy(y => y.Index / chunkSize)
.Select(y => y.Select(v => v.Value).ToList())
.ToList()
)
.SelectMany(x => x)
.ToList();
return lookupEntitiesChunked;
}
0 Comments