我一直在尝试许多不同的github项目示例,并多次阅读bigtable api指南。我不明白为什么它不允许我连续设置多个单元格。在所示的示例中,它们仅具有每行一个值的示例。
我还使用了cbt命令来查看添加的列族是否在表中以及它们是否在表中,但是当我使用count命令时,我看不到任何条目。
我已经在表上使用了mutate_rows命令,并且在行上使用了commit命令,但是都没有添加行。 我也意识到行提交命令实际上只是:
table.mutate_rows([row])
所以,我似乎根本无法理解我在做什么错。
import base64
import json
import ast
import datetime
from google.cloud import bigtable
from google.cloud.bigtable import column_family
from google.cloud.bigtable import row_filters
def function(event, context):
data = base64.b64decode(event['data']).decode('utf-8')
data = ast.literal_eval(data)
print(type(data))
print(data)
# Create a Cloud Bigtable client.
client = bigtable.Client(project=project_id, admin=True)
# Connect to an existing Cloud Bigtable instance.
instance = client.instance(instance_id)
print('opening the {} table.'.format(table_id))
table = instance.table(table_id)
# [START writing_rows]
max_versions_rule = column_family.MaxVersionsGCRule(2)
column_family_id = 'states'.encode('utf-8')
column_families = {column_family_id: max_versions_rule}
if not table.exists():
table.create(column_families=column_families)
else:
print("Table {} already exists.".format(table_id))
row_key = (data['serial_num'] + str(datetime.datetime.utcnow())).encode('utf-8')
row_obj = table.row(row_key)
for key, value in data.items():
row_obj.set_cell(
column_family_id,
str(key).encode('utf-8'),
str(value).encode('utf-8'),
timestamp=datetime.datetime.utcnow()
)
print(row_obj)
print(str(row_obj))
print(row_obj.table)
print(row_obj.row_key)
row_obj.commit()
'''
table.mutate_rows([row_obj])
'''
print('Inserted/updated data.')
# [END writing_rows]
# [START creating_a_filter]
# Create a filter to only retrieve the most recent version of the cell
# for each column across entire row.
row_filter = row_filters.CellsColumnLimitFilter(1)
# [END creating_a_filter]
# [START read_rows]
row = table.read_row(row_key, row_filter)
print(row)
for key, value in data.items():
cell_values = row.cells[column_family_id][column][0]
print('{} = {} should be {}'.format(key, cell_values, value))
# [END read_rows]
答案 0 :(得分:1)
这是我最终得到的解决方案
import base64
import json
import ast
import datetime
from google.cloud import bigtable
from google.cloud.bigtable import column_family
from google.cloud.bigtable import row_filters
def hello_pubsub(event, context):
data = base64.b64decode(event['data']).decode('utf-8')
data = ast.literal_eval(data)
print(type(data))
print(data)
# Create a Cloud Bigtable client.
client = bigtable.Client(project=project_id, admin=True)
# Connect to an existing Cloud Bigtable instance.
instance = client.instance(instance_id)
print('opening the {} table.'.format(table_id))
table = instance.table(table_id)
# [START writing_rows]
max_versions_rule = column_family.MaxVersionsGCRule(2)
column_family_id = 'state'
column_families = {column_family_id: max_versions_rule}
if not table.exists():
table.create(column_families=column_families)
else:
print("Table {} already exists.".format(table_id))
row_key = (data['serial_num'] + " " + str(datetime.datetime.utcnow())).encode('utf-8')
rows = []
for key, value in data.items():
row = table.row(row_key)
row.set_cell(column_family_id,
str(key).encode('utf-8'),
str(value),
timestamp=datetime.datetime.utcnow())
rows.append(row)
table.mutate_rows(rows)
print('Inserted/updated data.')
# [END writing_rows]
# [START creating_a_filter]
# Create a filter to only retrieve the most recent version of the cell
# for each column across entire row.
row_filter = row_filters.CellsColumnLimitFilter(1)
# [END creating_a_filter]
# [START read_rows]
partial_rows = table.read_row(row_key, row_filter)
print(partial_rows.cells)
for key, value in data.items():
cell_value = partial_rows.cell_value(column_family_id, str(key).encode('utf-8'))
print('{} = {} should be {}'.format(key, cell_value, value))
# [END read_rows]